summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/Makefile8
-rw-r--r--drivers/gpu/drm/armada/armada_510.c23
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c187
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h11
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h13
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c245
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c4
-rw-r--r--drivers/gpu/drm/armada/armada_output.c4
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c6
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c16
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c7
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c17
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c2
-rw-r--r--drivers/gpu/drm/bridge/ptn3460.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c14
-rw-r--r--drivers/gpu/drm/drm_buffer.c6
-rw-r--r--drivers/gpu/drm/drm_bufs.c17
-rw-r--r--drivers/gpu/drm/drm_context.c102
-rw-r--r--drivers/gpu/drm/drm_crtc.c604
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_debugfs.c182
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c2715
-rw-r--r--drivers/gpu/drm/drm_drv.c1190
-rw-r--r--drivers/gpu/drm/drm_edid.c11
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c5
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c115
-rw-r--r--drivers/gpu/drm/drm_fops.c85
-rw-r--r--drivers/gpu/drm/drm_gem.c29
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_info.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c372
-rw-r--r--drivers/gpu/drm/drm_legacy.h51
-rw-r--r--drivers/gpu/drm/drm_lock.c1
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c14
-rw-r--r--drivers/gpu/drm/drm_of.c67
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c7
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c9
-rw-r--r--drivers/gpu/drm/drm_rect.c140
-rw-r--r--drivers/gpu/drm/drm_stub.c805
-rw-r--r--drivers/gpu/drm/drm_sysfs.c92
-rw-r--r--drivers/gpu/drm/exynos/Kconfig2
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c117
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c285
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c277
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c259
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c38
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c57
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c4
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c5
-rw-r--r--drivers/gpu/drm/gma500/gtt.c2
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c4
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c4
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c390
-rw-r--r--drivers/gpu/drm/i915/Kconfig12
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c425
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c73
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c74
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h189
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c93
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c149
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c308
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c55
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c161
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c104
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c149
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c89
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c624
-rw-r--r--drivers/gpu/drm/i915/i915_params.c15
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h500
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c57
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c38
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c474
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1459
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c678
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c548
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h154
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c28
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c8
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c10
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c37
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c102
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c54
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c20
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c3
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c22
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c160
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c897
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate.h2
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen6.c1
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen7.c1
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen8.c1
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c444
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h90
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c10
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c36
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c212
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c16
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h58
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h296
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h5
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h56
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h239
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c69
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h109
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c27
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c377
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c25
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h431
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c159
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h25
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h4
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c91
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c49
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c17
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c12
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c31
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c33
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c21
-rw-r--r--drivers/gpu/drm/panel/Kconfig7
-rw-r--r--drivers/gpu/drm/panel/panel-ld9040.c21
-rw-r--r--drivers/gpu/drm/panel/panel-s6e8aa0.c29
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c203
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h4
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c16
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c13
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c39
-rw-r--r--drivers/gpu/drm/radeon/cik.c722
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c247
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c16
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c18
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/ni.c1
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c178
-rw-r--r--drivers/gpu/drm/radeon/r100.c62
-rw-r--r--drivers/gpu/drm/radeon/r300.c13
-rw-r--r--drivers/gpu/drm/radeon/r600.c24
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon.h127
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c79
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h77
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c388
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c87
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c72
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c58
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c319
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c63
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c287
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h22
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.c167
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h71
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c278
-rw-r--r--drivers/gpu/drm/radeon/rs400.c14
-rw-r--r--drivers/gpu/drm/radeon/rs600.c17
-rw-r--r--drivers/gpu/drm/radeon/rv770.c1
-rw-r--r--drivers/gpu/drm/radeon/si.c460
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c172
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c152
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.h5
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c62
-rw-r--r--drivers/gpu/drm/radeon/sislands_smc.h3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c4
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c6
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c2
-rw-r--r--drivers/gpu/drm/sti/Kconfig14
-rw-r--r--drivers/gpu/drm/sti/Makefile21
-rw-r--r--drivers/gpu/drm/sti/NOTES58
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c281
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h90
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.c421
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.h22
-rw-r--r--drivers/gpu/drm/sti/sti_drm_drv.c241
-rw-r--r--drivers/gpu/drm/sti/sti_drm_drv.h29
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.c195
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.h18
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c549
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.h16
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c794
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c810
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h88
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c336
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h14
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c211
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h14
-rw-r--r--drivers/gpu/drm/sti/sti_layer.c197
-rw-r--r--drivers/gpu/drm/sti/sti_layer.h123
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c249
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h54
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c648
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c138
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h12
-rw-r--r--drivers/gpu/drm/sti/sti_vtac.c215
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c366
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.h28
-rw-r--r--drivers/gpu/drm/tegra/dc.c123
-rw-r--r--drivers/gpu/drm/tegra/dc.h5
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c1
-rw-r--r--drivers/gpu/drm/tegra/drm.c216
-rw-r--r--drivers/gpu/drm/tegra/drm.h11
-rw-r--r--drivers/gpu/drm/tegra/dsi.c4
-rw-r--r--drivers/gpu/drm/tegra/fb.c66
-rw-r--r--drivers/gpu/drm/tegra/gem.c5
-rw-r--r--drivers/gpu/drm/tegra/gem.h16
-rw-r--r--drivers/gpu/drm/tegra/gr2d.c1
-rw-r--r--drivers/gpu/drm/tegra/gr3d.c1
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c1
-rw-r--r--drivers/gpu/drm/tegra/output.c6
-rw-r--r--drivers/gpu/drm/tegra/sor.c21
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c15
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c41
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c29
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c37
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c20
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c34
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c13
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c5
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c8
-rw-r--r--drivers/gpu/drm/udl/udl_main.c15
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c341
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h74
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c227
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c396
-rw-r--r--drivers/gpu/host1x/job.c22
-rw-r--r--drivers/gpu/vga/vgaarb.c40
314 files changed, 25413 insertions, 7281 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f5120046ff80..b066bb3ca01a 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -114,6 +114,7 @@ config DRM_RADEON
select POWER_SUPPLY
select HWMON
select BACKLIGHT_CLASS_DEVICE
+ select INTERVAL_TREE
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
@@ -201,3 +202,5 @@ source "drivers/gpu/drm/msm/Kconfig"
source "drivers/gpu/drm/tegra/Kconfig"
source "drivers/gpu/drm/panel/Kconfig"
+
+source "drivers/gpu/drm/sti/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index dd2ba4269740..4a55d59ccd22 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -6,8 +6,8 @@ ccflags-y := -Iinclude/drm
drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o \
- drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
- drm_lock.o drm_memory.o drm_stub.o drm_vm.o \
+ drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
+ drm_lock.o drm_memory.o drm_drv.o drm_vm.o \
drm_agpsupport.o drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
@@ -20,11 +20,12 @@ drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
drm-$(CONFIG_PCI) += ati_pcigart.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
+drm-$(CONFIG_OF) += drm_of.o
drm-usb-y := drm_usb.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
- drm_plane_helper.o
+ drm_plane_helper.o drm_dp_mst_topology.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
@@ -63,6 +64,7 @@ obj-$(CONFIG_DRM_QXL) += qxl/
obj-$(CONFIG_DRM_BOCHS) += bochs/
obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/
+obj-$(CONFIG_DRM_STI) += sti/
obj-y += i2c/
obj-y += panel/
obj-y += bridge/
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
index 59948eff6095..ad3d2ebf95c9 100644
--- a/drivers/gpu/drm/armada/armada_510.c
+++ b/drivers/gpu/drm/armada/armada_510.c
@@ -15,20 +15,19 @@
#include "armada_drm.h"
#include "armada_hw.h"
-static int armada510_init(struct armada_private *priv, struct device *dev)
+static int armada510_crtc_init(struct armada_crtc *dcrtc, struct device *dev)
{
- priv->extclk[0] = devm_clk_get(dev, "ext_ref_clk_1");
+ struct clk *clk;
- if (IS_ERR(priv->extclk[0]) && PTR_ERR(priv->extclk[0]) == -ENOENT)
- priv->extclk[0] = ERR_PTR(-EPROBE_DEFER);
+ clk = devm_clk_get(dev, "ext_ref_clk1");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk) == -ENOENT ? -EPROBE_DEFER : PTR_ERR(clk);
- return PTR_RET(priv->extclk[0]);
-}
+ dcrtc->extclk[0] = clk;
-static int armada510_crtc_init(struct armada_crtc *dcrtc)
-{
/* Lower the watermark so to eliminate jitter at higher bandwidths */
armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
+
return 0;
}
@@ -45,8 +44,7 @@ static int armada510_crtc_init(struct armada_crtc *dcrtc)
static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
const struct drm_display_mode *mode, uint32_t *sclk)
{
- struct armada_private *priv = dcrtc->crtc.dev->dev_private;
- struct clk *clk = priv->extclk[0];
+ struct clk *clk = dcrtc->extclk[0];
int ret;
if (dcrtc->num == 1)
@@ -81,7 +79,6 @@ static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
const struct armada_variant armada510_ops = {
.has_spu_adv_reg = true,
.spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
- .init = armada510_init,
- .crtc_init = armada510_crtc_init,
- .crtc_compute_clock = armada510_crtc_compute_clock,
+ .init = armada510_crtc_init,
+ .compute_clock = armada510_crtc_compute_clock,
};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 3aedf9e993e6..9a0cc09e6653 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -7,6 +7,9 @@
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "armada_crtc.h"
@@ -332,24 +335,23 @@ static void armada_drm_crtc_commit(struct drm_crtc *crtc)
static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode, struct drm_display_mode *adj)
{
- struct armada_private *priv = crtc->dev->dev_private;
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
int ret;
/* We can't do interlaced modes if we don't have the SPU_ADV_REG */
- if (!priv->variant->has_spu_adv_reg &&
+ if (!dcrtc->variant->has_spu_adv_reg &&
adj->flags & DRM_MODE_FLAG_INTERLACE)
return false;
/* Check whether the display mode is possible */
- ret = priv->variant->crtc_compute_clock(dcrtc, adj, NULL);
+ ret = dcrtc->variant->compute_clock(dcrtc, adj, NULL);
if (ret)
return false;
return true;
}
-void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
+static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
{
struct armada_vbl_event *e, *n;
void __iomem *base = dcrtc->base;
@@ -410,6 +412,27 @@ void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
}
}
+static irqreturn_t armada_drm_irq(int irq, void *arg)
+{
+ struct armada_crtc *dcrtc = arg;
+ u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
+
+ /*
+ * This is rediculous - rather than writing bits to clear, we
+ * have to set the actual status register value. This is racy.
+ */
+ writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+
+ /* Mask out those interrupts we haven't enabled */
+ v = stat & dcrtc->irq_ena;
+
+ if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
+ armada_drm_crtc_irq(dcrtc, stat);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
/* These are locked by dev->vbl_lock */
void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask)
{
@@ -470,7 +493,6 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode, struct drm_display_mode *adj,
int x, int y, struct drm_framebuffer *old_fb)
{
- struct armada_private *priv = crtc->dev->dev_private;
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
struct armada_regs regs[17];
uint32_t lm, rm, tm, bm, val, sclk;
@@ -515,7 +537,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
}
/* Now compute the divider for real */
- priv->variant->crtc_compute_clock(dcrtc, adj, &sclk);
+ dcrtc->variant->compute_clock(dcrtc, adj, &sclk);
/* Ensure graphic fifo is enabled */
armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
@@ -537,7 +559,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
dcrtc->v[1].spu_v_porch = tm << 16 | bm;
val = adj->crtc_hsync_start;
dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
- priv->variant->spu_adv_reg;
+ dcrtc->variant->spu_adv_reg;
if (interlaced) {
/* Odd interlaced frame */
@@ -546,7 +568,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
- priv->variant->spu_adv_reg;
+ dcrtc->variant->spu_adv_reg;
} else {
dcrtc->v[0] = dcrtc->v[1];
}
@@ -561,7 +583,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
LCD_SPUT_V_H_TOTAL);
- if (priv->variant->has_spu_adv_reg) {
+ if (dcrtc->variant->has_spu_adv_reg) {
armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
@@ -805,12 +827,11 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_private *priv = crtc->dev->dev_private;
struct armada_gem_object *obj = NULL;
int ret;
/* If no cursor support, replicate drm's return value */
- if (!priv->variant->has_spu_adv_reg)
+ if (!dcrtc->variant->has_spu_adv_reg)
return -ENXIO;
if (handle && w > 0 && h > 0) {
@@ -858,11 +879,10 @@ static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct drm_device *dev = crtc->dev;
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_private *priv = crtc->dev->dev_private;
int ret;
/* If no cursor support, replicate drm's return value */
- if (!priv->variant->has_spu_adv_reg)
+ if (!dcrtc->variant->has_spu_adv_reg)
return -EFAULT;
mutex_lock(&dev->struct_mutex);
@@ -888,6 +908,10 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
if (!IS_ERR(dcrtc->clk))
clk_disable_unprepare(dcrtc->clk);
+ writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ENA);
+
+ of_node_put(dcrtc->crtc.port);
+
kfree(dcrtc);
}
@@ -1027,19 +1051,20 @@ static int armada_drm_crtc_create_properties(struct drm_device *dev)
return 0;
}
-int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
- struct resource *res)
+int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
+ struct resource *res, int irq, const struct armada_variant *variant,
+ struct device_node *port)
{
- struct armada_private *priv = dev->dev_private;
+ struct armada_private *priv = drm->dev_private;
struct armada_crtc *dcrtc;
void __iomem *base;
int ret;
- ret = armada_drm_crtc_create_properties(dev);
+ ret = armada_drm_crtc_create_properties(drm);
if (ret)
return ret;
- base = devm_ioremap_resource(dev->dev, res);
+ base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -1049,8 +1074,12 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
return -ENOMEM;
}
+ if (dev != drm->dev)
+ dev_set_drvdata(dev, dcrtc);
+
+ dcrtc->variant = variant;
dcrtc->base = base;
- dcrtc->num = num;
+ dcrtc->num = drm->mode_config.num_crtc;
dcrtc->clk = ERR_PTR(-EINVAL);
dcrtc->csc_yuv_mode = CSC_AUTO;
dcrtc->csc_rgb_mode = CSC_AUTO;
@@ -1072,9 +1101,18 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN);
+ writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+ writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+
+ ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
+ dcrtc);
+ if (ret < 0) {
+ kfree(dcrtc);
+ return ret;
+ }
- if (priv->variant->crtc_init) {
- ret = priv->variant->crtc_init(dcrtc);
+ if (dcrtc->variant->init) {
+ ret = dcrtc->variant->init(dcrtc, dev);
if (ret) {
kfree(dcrtc);
return ret;
@@ -1086,7 +1124,8 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
priv->dcrtc[dcrtc->num] = dcrtc;
- drm_crtc_init(dev, &dcrtc->crtc, &armada_crtc_funcs);
+ dcrtc->crtc.port = port;
+ drm_crtc_init(drm, &dcrtc->crtc, &armada_crtc_funcs);
drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
@@ -1094,5 +1133,107 @@ int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
dcrtc->csc_rgb_mode);
- return armada_overlay_plane_create(dev, 1 << dcrtc->num);
+ return armada_overlay_plane_create(drm, 1 << dcrtc->num);
+}
+
+static int
+armada_lcd_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ int irq = platform_get_irq(pdev, 0);
+ const struct armada_variant *variant;
+ struct device_node *port = NULL;
+
+ if (irq < 0)
+ return irq;
+
+ if (!dev->of_node) {
+ const struct platform_device_id *id;
+
+ id = platform_get_device_id(pdev);
+ if (!id)
+ return -ENXIO;
+
+ variant = (const struct armada_variant *)id->driver_data;
+ } else {
+ const struct of_device_id *match;
+ struct device_node *np, *parent = dev->of_node;
+
+ match = of_match_device(dev->driver->of_match_table, dev);
+ if (!match)
+ return -ENXIO;
+
+ np = of_get_child_by_name(parent, "ports");
+ if (np)
+ parent = np;
+ port = of_get_child_by_name(parent, "port");
+ of_node_put(np);
+ if (!port) {
+ dev_err(dev, "no port node found in %s\n",
+ parent->full_name);
+ return -ENXIO;
+ }
+
+ variant = match->data;
+ }
+
+ return armada_drm_crtc_create(drm, dev, res, irq, variant, port);
+}
+
+static void
+armada_lcd_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct armada_crtc *dcrtc = dev_get_drvdata(dev);
+
+ armada_drm_crtc_destroy(&dcrtc->crtc);
}
+
+static const struct component_ops armada_lcd_ops = {
+ .bind = armada_lcd_bind,
+ .unbind = armada_lcd_unbind,
+};
+
+static int armada_lcd_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &armada_lcd_ops);
+}
+
+static int armada_lcd_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &armada_lcd_ops);
+ return 0;
+}
+
+static struct of_device_id armada_lcd_of_match[] = {
+ {
+ .compatible = "marvell,dove-lcd",
+ .data = &armada510_ops,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, armada_lcd_of_match);
+
+static const struct platform_device_id armada_lcd_platform_ids[] = {
+ {
+ .name = "armada-lcd",
+ .driver_data = (unsigned long)&armada510_ops,
+ }, {
+ .name = "armada-510-lcd",
+ .driver_data = (unsigned long)&armada510_ops,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, armada_lcd_platform_ids);
+
+struct platform_driver armada_lcd_platform_driver = {
+ .probe = armada_lcd_probe,
+ .remove = armada_lcd_remove,
+ .driver = {
+ .name = "armada-lcd",
+ .owner = THIS_MODULE,
+ .of_match_table = armada_lcd_of_match,
+ },
+ .id_table = armada_lcd_platform_ids,
+};
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index 9c10a07e7492..98102a5a9af5 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -32,12 +32,15 @@ struct armada_regs {
armada_reg_queue_mod(_r, _i, 0, 0, ~0)
struct armada_frame_work;
+struct armada_variant;
struct armada_crtc {
struct drm_crtc crtc;
+ const struct armada_variant *variant;
unsigned num;
void __iomem *base;
struct clk *clk;
+ struct clk *extclk[2];
struct {
uint32_t spu_v_h_total;
uint32_t spu_v_porch;
@@ -72,12 +75,16 @@ struct armada_crtc {
};
#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
-int armada_drm_crtc_create(struct drm_device *, unsigned, struct resource *);
+struct device_node;
+int armada_drm_crtc_create(struct drm_device *, struct device *,
+ struct resource *, int, const struct armada_variant *,
+ struct device_node *);
void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
-void armada_drm_crtc_irq(struct armada_crtc *, u32);
void armada_drm_crtc_disable_irq(struct armada_crtc *, u32);
void armada_drm_crtc_enable_irq(struct armada_crtc *, u32);
void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
+extern struct platform_driver armada_lcd_platform_driver;
+
#endif
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index a72cae03b99b..ea63c6c7c66f 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -59,26 +59,23 @@ void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *,
struct armada_private;
struct armada_variant {
- bool has_spu_adv_reg;
+ bool has_spu_adv_reg;
uint32_t spu_adv_reg;
- int (*init)(struct armada_private *, struct device *);
- int (*crtc_init)(struct armada_crtc *);
- int (*crtc_compute_clock)(struct armada_crtc *,
- const struct drm_display_mode *,
- uint32_t *);
+ int (*init)(struct armada_crtc *, struct device *);
+ int (*compute_clock)(struct armada_crtc *,
+ const struct drm_display_mode *,
+ uint32_t *);
};
/* Variant ops */
extern const struct armada_variant armada510_ops;
struct armada_private {
- const struct armada_variant *variant;
struct work_struct fb_unref_work;
DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
struct drm_fb_helper *fbdev;
struct armada_crtc *dcrtc[2];
struct drm_mm linear;
- struct clk *extclk[2];
struct drm_property *csc_yuv_prop;
struct drm_property *csc_rgb_prop;
struct drm_property *colorkey_prop;
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 8ab3cd1a8cdb..e2d5792b140f 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -6,7 +6,9 @@
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
+#include <linux/component.h>
#include <linux/module.h>
+#include <linux/of_graph.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "armada_crtc.h"
@@ -52,6 +54,11 @@ static const struct armada_drm_slave_config tda19988_config = {
};
#endif
+static bool is_componentized(struct device *dev)
+{
+ return dev->of_node || dev->platform_data;
+}
+
static void armada_drm_unref_work(struct work_struct *work)
{
struct armada_private *priv =
@@ -85,6 +92,7 @@ void armada_drm_queue_unref_work(struct drm_device *dev,
static int armada_drm_load(struct drm_device *dev, unsigned long flags)
{
const struct platform_device_id *id;
+ const struct armada_variant *variant;
struct armada_private *priv;
struct resource *res[ARRAY_SIZE(priv->dcrtc)];
struct resource *mem = NULL;
@@ -107,7 +115,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
return -EINVAL;
}
- if (!res[0] || !mem)
+ if (!mem)
return -ENXIO;
if (!devm_request_mem_region(dev->dev, mem->start,
@@ -128,11 +136,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
if (!id)
return -ENXIO;
- priv->variant = (struct armada_variant *)id->driver_data;
-
- ret = priv->variant->init(priv, dev->dev);
- if (ret)
- return ret;
+ variant = (const struct armada_variant *)id->driver_data;
INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
INIT_KFIFO(priv->fb_unref);
@@ -155,40 +159,50 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
/* Create all LCD controllers */
for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
+ int irq;
+
if (!res[n])
break;
- ret = armada_drm_crtc_create(dev, n, res[n]);
+ irq = platform_get_irq(dev->platformdev, n);
+ if (irq < 0)
+ goto err_kms;
+
+ ret = armada_drm_crtc_create(dev, dev->dev, res[n], irq,
+ variant, NULL);
if (ret)
goto err_kms;
}
+ if (is_componentized(dev->dev)) {
+ ret = component_bind_all(dev->dev, dev);
+ if (ret)
+ goto err_kms;
+ } else {
#ifdef CONFIG_DRM_ARMADA_TDA1998X
- ret = armada_drm_connector_slave_create(dev, &tda19988_config);
- if (ret)
- goto err_kms;
+ ret = armada_drm_connector_slave_create(dev, &tda19988_config);
+ if (ret)
+ goto err_kms;
#endif
+ }
- ret = drm_vblank_init(dev, n);
- if (ret)
- goto err_kms;
-
- ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret)
- goto err_kms;
+ goto err_comp;
dev->vblank_disable_allowed = 1;
ret = armada_fbdev_init(dev);
if (ret)
- goto err_irq;
+ goto err_comp;
drm_kms_helper_poll_init(dev);
return 0;
- err_irq:
- drm_irq_uninstall(dev);
+ err_comp:
+ if (is_componentized(dev->dev))
+ component_unbind_all(dev->dev, dev);
err_kms:
drm_mode_config_cleanup(dev);
drm_mm_takedown(&priv->linear);
@@ -203,7 +217,10 @@ static int armada_drm_unload(struct drm_device *dev)
drm_kms_helper_poll_fini(dev);
armada_fbdev_fini(dev);
- drm_irq_uninstall(dev);
+
+ if (is_componentized(dev->dev))
+ component_unbind_all(dev->dev, dev);
+
drm_mode_config_cleanup(dev);
drm_mm_takedown(&priv->linear);
flush_work(&priv->fb_unref_work);
@@ -259,52 +276,6 @@ static void armada_drm_disable_vblank(struct drm_device *dev, int crtc)
armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
}
-static irqreturn_t armada_drm_irq_handler(int irq, void *arg)
-{
- struct drm_device *dev = arg;
- struct armada_private *priv = dev->dev_private;
- struct armada_crtc *dcrtc = priv->dcrtc[0];
- uint32_t v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
- irqreturn_t handled = IRQ_NONE;
-
- /*
- * This is rediculous - rather than writing bits to clear, we
- * have to set the actual status register value. This is racy.
- */
- writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
-
- /* Mask out those interrupts we haven't enabled */
- v = stat & dcrtc->irq_ena;
-
- if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
- armada_drm_crtc_irq(dcrtc, stat);
- handled = IRQ_HANDLED;
- }
-
- return handled;
-}
-
-static int armada_drm_irq_postinstall(struct drm_device *dev)
-{
- struct armada_private *priv = dev->dev_private;
- struct armada_crtc *dcrtc = priv->dcrtc[0];
-
- spin_lock_irq(&dev->vbl_lock);
- writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
- writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
- spin_unlock_irq(&dev->vbl_lock);
-
- return 0;
-}
-
-static void armada_drm_irq_uninstall(struct drm_device *dev)
-{
- struct armada_private *priv = dev->dev_private;
- struct armada_crtc *dcrtc = priv->dcrtc[0];
-
- writel(0, dcrtc->base + LCD_SPU_IRQ_ENA);
-}
-
static struct drm_ioctl_desc armada_ioctls[] = {
DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,
DRM_UNLOCKED),
@@ -340,9 +311,6 @@ static struct drm_driver armada_drm_driver = {
.get_vblank_counter = drm_vblank_count,
.enable_vblank = armada_drm_enable_vblank,
.disable_vblank = armada_drm_disable_vblank,
- .irq_handler = armada_drm_irq_handler,
- .irq_postinstall = armada_drm_irq_postinstall,
- .irq_uninstall = armada_drm_irq_uninstall,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = armada_drm_debugfs_init,
.debugfs_cleanup = armada_drm_debugfs_cleanup,
@@ -362,19 +330,140 @@ static struct drm_driver armada_drm_driver = {
.desc = "Armada SoC DRM",
.date = "20120730",
.driver_features = DRIVER_GEM | DRIVER_MODESET |
- DRIVER_HAVE_IRQ | DRIVER_PRIME,
+ DRIVER_PRIME,
.ioctls = armada_ioctls,
.fops = &armada_drm_fops,
};
+static int armada_drm_bind(struct device *dev)
+{
+ return drm_platform_init(&armada_drm_driver, to_platform_device(dev));
+}
+
+static void armada_drm_unbind(struct device *dev)
+{
+ drm_put_dev(dev_get_drvdata(dev));
+}
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int compare_dev_name(struct device *dev, void *data)
+{
+ const char *name = data;
+ return !strcmp(dev_name(dev), name);
+}
+
+static void armada_add_endpoints(struct device *dev,
+ struct component_match **match, struct device_node *port)
+{
+ struct device_node *ep, *remote;
+
+ for_each_child_of_node(port, ep) {
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote || !of_device_is_available(remote)) {
+ of_node_put(remote);
+ continue;
+ } else if (!of_device_is_available(remote->parent)) {
+ dev_warn(dev, "parent device of %s is not available\n",
+ remote->full_name);
+ of_node_put(remote);
+ continue;
+ }
+
+ component_match_add(dev, match, compare_of, remote);
+ of_node_put(remote);
+ }
+}
+
+static int armada_drm_find_components(struct device *dev,
+ struct component_match **match)
+{
+ struct device_node *port;
+ int i;
+
+ if (dev->of_node) {
+ struct device_node *np = dev->of_node;
+
+ for (i = 0; ; i++) {
+ port = of_parse_phandle(np, "ports", i);
+ if (!port)
+ break;
+
+ component_match_add(dev, match, compare_of, port);
+ of_node_put(port);
+ }
+
+ if (i == 0) {
+ dev_err(dev, "missing 'ports' property\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; ; i++) {
+ port = of_parse_phandle(np, "ports", i);
+ if (!port)
+ break;
+
+ armada_add_endpoints(dev, match, port);
+ of_node_put(port);
+ }
+ } else if (dev->platform_data) {
+ char **devices = dev->platform_data;
+ struct device *d;
+
+ for (i = 0; devices[i]; i++)
+ component_match_add(dev, match, compare_dev_name,
+ devices[i]);
+
+ if (i == 0) {
+ dev_err(dev, "missing 'ports' property\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; devices[i]; i++) {
+ d = bus_find_device_by_name(&platform_bus_type, NULL,
+ devices[i]);
+ if (d && d->of_node) {
+ for_each_child_of_node(d->of_node, port)
+ armada_add_endpoints(dev, match, port);
+ }
+ put_device(d);
+ }
+ }
+
+ return 0;
+}
+
+static const struct component_master_ops armada_master_ops = {
+ .bind = armada_drm_bind,
+ .unbind = armada_drm_unbind,
+};
+
static int armada_drm_probe(struct platform_device *pdev)
{
- return drm_platform_init(&armada_drm_driver, pdev);
+ if (is_componentized(&pdev->dev)) {
+ struct component_match *match = NULL;
+ int ret;
+
+ ret = armada_drm_find_components(&pdev->dev, &match);
+ if (ret < 0)
+ return ret;
+
+ return component_master_add_with_match(&pdev->dev,
+ &armada_master_ops, match);
+ } else {
+ return drm_platform_init(&armada_drm_driver, pdev);
+ }
}
static int armada_drm_remove(struct platform_device *pdev)
{
- drm_put_dev(platform_get_drvdata(pdev));
+ if (is_componentized(&pdev->dev))
+ component_master_del(&pdev->dev, &armada_master_ops);
+ else
+ drm_put_dev(platform_get_drvdata(pdev));
return 0;
}
@@ -402,14 +491,24 @@ static struct platform_driver armada_drm_platform_driver = {
static int __init armada_drm_init(void)
{
+ int ret;
+
armada_drm_driver.num_ioctls = ARRAY_SIZE(armada_ioctls);
- return platform_driver_register(&armada_drm_platform_driver);
+
+ ret = platform_driver_register(&armada_lcd_platform_driver);
+ if (ret)
+ return ret;
+ ret = platform_driver_register(&armada_drm_platform_driver);
+ if (ret)
+ platform_driver_unregister(&armada_lcd_platform_driver);
+ return ret;
}
module_init(armada_drm_init);
static void __exit armada_drm_exit(void)
{
platform_driver_unregister(&armada_drm_platform_driver);
+ platform_driver_unregister(&armada_lcd_platform_driver);
}
module_exit(armada_drm_exit);
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index fd166f532ab9..7838e731b0de 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -131,7 +131,7 @@ static int armada_fb_probe(struct drm_fb_helper *fbh,
return ret;
}
-static struct drm_fb_helper_funcs armada_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs armada_fb_helper_funcs = {
.gamma_set = armada_drm_crtc_gamma_set,
.gamma_get = armada_drm_crtc_gamma_get,
.fb_probe = armada_fb_probe,
@@ -149,7 +149,7 @@ int armada_fbdev_init(struct drm_device *dev)
priv->fbdev = fbh;
- fbh->funcs = &armada_fb_helper_funcs;
+ drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs);
ret = drm_fb_helper_init(dev, fbh, 1, 1);
if (ret) {
diff --git a/drivers/gpu/drm/armada/armada_output.c b/drivers/gpu/drm/armada/armada_output.c
index d685a5421485..abbc309fe539 100644
--- a/drivers/gpu/drm/armada/armada_output.c
+++ b/drivers/gpu/drm/armada/armada_output.c
@@ -48,7 +48,7 @@ static void armada_drm_connector_destroy(struct drm_connector *conn)
{
struct armada_connector *dconn = drm_to_armada_conn(conn);
- drm_sysfs_connector_remove(conn);
+ drm_connector_unregister(conn);
drm_connector_cleanup(conn);
kfree(dconn);
}
@@ -141,7 +141,7 @@ int armada_output_create(struct drm_device *dev,
if (ret)
goto err_conn;
- ret = drm_sysfs_connector_add(&dconn->conn);
+ ret = drm_connector_register(&dconn->conn);
if (ret)
goto err_sysfs;
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 5d6a87573c33..957d4fabf1e1 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -362,7 +362,7 @@ static inline int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
{
int ret;
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
if (ret) {
if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index a28640f47c27..cba45c774552 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -287,7 +287,7 @@ static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
*blue = ast_crtc->lut_b[regno] << 8;
}
-static struct drm_fb_helper_funcs ast_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs ast_fb_helper_funcs = {
.gamma_set = ast_fb_gamma_set,
.gamma_get = ast_fb_gamma_get,
.fb_probe = astfb_create,
@@ -328,8 +328,10 @@ int ast_fbdev_init(struct drm_device *dev)
return -ENOMEM;
ast->fbdev = afbdev;
- afbdev->helper.funcs = &ast_fb_helper_funcs;
spin_lock_init(&afbdev->dirty_lock);
+
+ drm_fb_helper_prepare(dev, &afbdev->helper, &ast_fb_helper_funcs);
+
ret = drm_fb_helper_init(dev, &afbdev->helper,
1, 1);
if (ret) {
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 114aee941d46..5389350244f2 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -667,17 +667,9 @@ static void ast_encoder_destroy(struct drm_encoder *encoder)
static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
/* pick the encoder ids */
- if (enc_id) {
- obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
- }
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
@@ -829,7 +821,7 @@ static void ast_connector_destroy(struct drm_connector *connector)
{
struct ast_connector *ast_connector = to_ast_connector(connector);
ast_i2c_destroy(ast_connector->i2c);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -871,7 +863,7 @@ static int ast_connector_init(struct drm_device *dev)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 9c13df29fd20..f5e0ead974a6 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -97,6 +97,7 @@ static struct drm_driver bochs_driver = {
/* ---------------------------------------------------------------------- */
/* pm interface */
+#ifdef CONFIG_PM_SLEEP
static int bochs_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -131,6 +132,7 @@ static int bochs_pm_resume(struct device *dev)
drm_kms_helper_poll_enable(drm_dev);
return 0;
}
+#endif
static const struct dev_pm_ops bochs_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(bochs_pm_suspend,
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index 561b84474122..fe95d31cd110 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -72,7 +72,7 @@ static int bochsfb_create(struct drm_fb_helper *helper,
bo = gem_to_bochs_bo(gobj);
- ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+ ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
if (ret)
return ret;
@@ -179,7 +179,7 @@ void bochs_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
*blue = regno;
}
-static struct drm_fb_helper_funcs bochs_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs bochs_fb_helper_funcs = {
.gamma_set = bochs_fb_gamma_set,
.gamma_get = bochs_fb_gamma_get,
.fb_probe = bochsfb_create,
@@ -189,7 +189,8 @@ int bochs_fbdev_init(struct bochs_device *bochs)
{
int ret;
- bochs->fb.helper.funcs = &bochs_fb_helper_funcs;
+ drm_fb_helper_prepare(bochs->dev, &bochs->fb.helper,
+ &bochs_fb_helper_funcs);
ret = drm_fb_helper_init(bochs->dev, &bochs->fb.helper,
1, 1);
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index dcf2e55f4ae9..9d7346b92653 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -53,7 +53,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb) {
bochs_fb = to_bochs_framebuffer(old_fb);
bo = gem_to_bochs_bo(bochs_fb->obj);
- ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+ ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
if (ret) {
DRM_ERROR("failed to reserve old_fb bo\n");
} else {
@@ -67,7 +67,7 @@ static int bochs_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
bochs_fb = to_bochs_framebuffer(crtc->primary->fb);
bo = gem_to_bochs_bo(bochs_fb->obj);
- ret = ttm_bo_reserve(&bo->bo, true, false, false, 0);
+ ret = ttm_bo_reserve(&bo->bo, true, false, false, NULL);
if (ret)
return ret;
@@ -216,18 +216,9 @@ static struct drm_encoder *
bochs_connector_best_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
/* pick the encoder ids */
- if (enc_id) {
- obj = drm_mode_object_find(connector->dev, enc_id,
- DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
- }
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index b9a695d92792..1728a1b0b813 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -387,7 +387,7 @@ int bochs_gem_create(struct drm_device *dev, u32 size, bool iskernel,
*obj = NULL;
- size = ALIGN(size, PAGE_SIZE);
+ size = PAGE_ALIGN(size);
if (size == 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c
index 98fd17ae4916..d466696ed5e8 100644
--- a/drivers/gpu/drm/bridge/ptn3460.c
+++ b/drivers/gpu/drm/bridge/ptn3460.c
@@ -328,7 +328,7 @@ int ptn3460_init(struct drm_device *dev, struct drm_encoder *encoder,
}
drm_connector_helper_add(&ptn_bridge->connector,
&ptn3460_connector_helper_funcs);
- drm_sysfs_connector_add(&ptn_bridge->connector);
+ drm_connector_register(&ptn_bridge->connector);
drm_mode_connector_attach_encoder(&ptn_bridge->connector, encoder);
return 0;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 08ce520f61a5..4516b052cc67 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -76,6 +76,7 @@ static void cirrus_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
+#ifdef CONFIG_PM_SLEEP
static int cirrus_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -110,6 +111,7 @@ static int cirrus_pm_resume(struct device *dev)
drm_kms_helper_poll_enable(drm_dev);
return 0;
}
+#endif
static const struct file_operations cirrus_driver_fops = {
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 117d3eca5e37..401c890b6c6a 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -241,7 +241,7 @@ static inline int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
{
int ret;
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
if (ret) {
if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 32bbba0a787b..2a135f253e29 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -288,7 +288,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
return 0;
}
-static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
.gamma_set = cirrus_crtc_fb_gamma_set,
.gamma_get = cirrus_crtc_fb_gamma_get,
.fb_probe = cirrusfb_create,
@@ -306,9 +306,11 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
return -ENOMEM;
cdev->mode_info.gfbdev = gfbdev;
- gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
spin_lock_init(&gfbdev->dirty_lock);
+ drm_fb_helper_prepare(cdev->dev, &gfbdev->helper,
+ &cirrus_fb_helper_funcs);
+
ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
if (ret) {
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 49332c5fe35b..e1c5c3222129 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -509,19 +509,9 @@ static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
*connector)
{
int enc_id = connector->encoder_ids[0];
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
/* pick the encoder ids */
- if (enc_id) {
- obj =
- drm_mode_object_find(connector->dev, enc_id,
- DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
- }
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
index 0406110f83ed..86a4a4a60afc 100644
--- a/drivers/gpu/drm/drm_buffer.c
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -80,11 +80,7 @@ int drm_buffer_alloc(struct drm_buffer **buf, int size)
error_out:
- /* Only last element can be null pointer so check for it first. */
- if ((*buf)->data[idx])
- kfree((*buf)->data[idx]);
-
- for (--idx; idx >= 0; --idx)
+ for (; idx >= 0; --idx)
kfree((*buf)->data[idx]);
kfree(*buf);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 68175b54504b..61acb8f6756d 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -1217,7 +1217,6 @@ int drm_infobufs(struct drm_device *dev, void *data,
struct drm_buf_desc __user *to =
&request->list[count];
struct drm_buf_entry *from = &dma->bufs[i];
- struct drm_freelist *list = &dma->bufs[i].freelist;
if (copy_to_user(&to->count,
&from->buf_count,
sizeof(from->buf_count)) ||
@@ -1225,19 +1224,19 @@ int drm_infobufs(struct drm_device *dev, void *data,
&from->buf_size,
sizeof(from->buf_size)) ||
copy_to_user(&to->low_mark,
- &list->low_mark,
- sizeof(list->low_mark)) ||
+ &from->low_mark,
+ sizeof(from->low_mark)) ||
copy_to_user(&to->high_mark,
- &list->high_mark,
- sizeof(list->high_mark)))
+ &from->high_mark,
+ sizeof(from->high_mark)))
return -EFAULT;
DRM_DEBUG("%d %d %d %d %d\n",
i,
dma->bufs[i].buf_count,
dma->bufs[i].buf_size,
- dma->bufs[i].freelist.low_mark,
- dma->bufs[i].freelist.high_mark);
+ dma->bufs[i].low_mark,
+ dma->bufs[i].high_mark);
++count;
}
}
@@ -1290,8 +1289,8 @@ int drm_markbufs(struct drm_device *dev, void *data,
if (request->high_mark < 0 || request->high_mark > entry->buf_count)
return -EINVAL;
- entry->freelist.low_mark = request->low_mark;
- entry->freelist.high_mark = request->high_mark;
+ entry->low_mark = request->low_mark;
+ entry->high_mark = request->high_mark;
return 0;
}
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index a4b017b6849e..9b23525c0ed0 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -1,18 +1,13 @@
-/**
- * \file drm_context.c
- * IOCTLs for generic contexts
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
/*
- * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
+ * Legacy: Generic DRM Contexts
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Gareth Hughes <gareth@valinux.com>
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
@@ -33,14 +28,14 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-/*
- * ChangeLog:
- * 2001-11-16 Torsten Duwe <duwe@caldera.de>
- * added context constructor/destructor hooks,
- * needed by SiS driver's memory management.
- */
-
#include <drm/drmP.h>
+#include "drm_legacy.h"
+
+struct drm_ctx_list {
+ struct list_head head;
+ drm_context_t handle;
+ struct drm_file *tag;
+};
/******************************************************************/
/** \name Context bitmap support */
@@ -56,7 +51,7 @@
* in drm_device::ctx_idr, while holding the drm_device::struct_mutex
* lock.
*/
-void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
+void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
mutex_lock(&dev->struct_mutex);
idr_remove(&dev->ctx_idr, ctx_handle);
@@ -72,7 +67,7 @@ void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
* Allocate a new idr from drm_device::ctx_idr while holding the
* drm_device::struct_mutex lock.
*/
-static int drm_ctxbitmap_next(struct drm_device * dev)
+static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
{
int ret;
@@ -90,7 +85,7 @@ static int drm_ctxbitmap_next(struct drm_device * dev)
*
* Initialise the drm_device::ctx_idr
*/
-int drm_ctxbitmap_init(struct drm_device * dev)
+int drm_legacy_ctxbitmap_init(struct drm_device * dev)
{
idr_init(&dev->ctx_idr);
return 0;
@@ -104,13 +99,43 @@ int drm_ctxbitmap_init(struct drm_device * dev)
* Free all idr members using drm_ctx_sarea_free helper function
* while holding the drm_device::struct_mutex lock.
*/
-void drm_ctxbitmap_cleanup(struct drm_device * dev)
+void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
{
mutex_lock(&dev->struct_mutex);
idr_destroy(&dev->ctx_idr);
mutex_unlock(&dev->struct_mutex);
}
+/**
+ * drm_ctxbitmap_flush() - Flush all contexts owned by a file
+ * @dev: DRM device to operate on
+ * @file: Open file to flush contexts for
+ *
+ * This iterates over all contexts on @dev and drops them if they're owned by
+ * @file. Note that after this call returns, new contexts might be added if
+ * the file is still alive.
+ */
+void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_ctx_list *pos, *tmp;
+
+ mutex_lock(&dev->ctxlist_mutex);
+
+ list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
+ if (pos->tag == file &&
+ pos->handle != DRM_KERNEL_CONTEXT) {
+ if (dev->driver->context_dtor)
+ dev->driver->context_dtor(dev, pos->handle);
+
+ drm_legacy_ctxbitmap_free(dev, pos->handle);
+ list_del(&pos->head);
+ kfree(pos);
+ }
+ }
+
+ mutex_unlock(&dev->ctxlist_mutex);
+}
+
/*@}*/
/******************************************************************/
@@ -129,8 +154,8 @@ void drm_ctxbitmap_cleanup(struct drm_device * dev)
* Gets the map from drm_device::ctx_idr with the handle specified and
* returns its handle.
*/
-int drm_getsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_getsareactx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx_priv_map *request = data;
struct drm_local_map *map;
@@ -173,8 +198,8 @@ int drm_getsareactx(struct drm_device *dev, void *data,
* Searches the mapping specified in \p arg and update the entry in
* drm_device::ctx_idr with it.
*/
-int drm_setsareactx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_setsareactx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx_priv_map *request = data;
struct drm_local_map *map = NULL;
@@ -273,8 +298,8 @@ static int drm_context_switch_complete(struct drm_device *dev,
* \param arg user argument pointing to a drm_ctx_res structure.
* \return zero on success or a negative number on failure.
*/
-int drm_resctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_resctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx_res *res = data;
struct drm_ctx ctx;
@@ -304,16 +329,16 @@ int drm_resctx(struct drm_device *dev, void *data,
*
* Get a new handle for the context and copy to userspace.
*/
-int drm_addctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_addctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx_list *ctx_entry;
struct drm_ctx *ctx = data;
- ctx->handle = drm_ctxbitmap_next(dev);
+ ctx->handle = drm_legacy_ctxbitmap_next(dev);
if (ctx->handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
- ctx->handle = drm_ctxbitmap_next(dev);
+ ctx->handle = drm_legacy_ctxbitmap_next(dev);
}
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle == -1) {
@@ -348,7 +373,8 @@ int drm_addctx(struct drm_device *dev, void *data,
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*/
-int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int drm_legacy_getctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
@@ -369,8 +395,8 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
*
* Calls context_switch().
*/
-int drm_switchctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_switchctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
@@ -389,8 +415,8 @@ int drm_switchctx(struct drm_device *dev, void *data,
*
* Calls context_switch_complete().
*/
-int drm_newctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_newctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
@@ -411,8 +437,8 @@ int drm_newctx(struct drm_device *dev, void *data,
*
* If not the special kernel context, calls ctxbitmap_free() to free the specified context.
*/
-int drm_rmctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_rmctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
@@ -420,7 +446,7 @@ int drm_rmctx(struct drm_device *dev, void *data,
if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
dev->driver->context_dtor(dev, ctx->handle);
- drm_ctxbitmap_free(dev, ctx->handle);
+ drm_legacy_ctxbitmap_free(dev, ctx->handle);
}
mutex_lock(&dev->ctxlist_mutex);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index fe94cc10cd35..fa2be249999c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -41,6 +41,10 @@
#include "drm_crtc_internal.h"
+static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *r,
+ struct drm_file *file_priv);
+
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: drm device
@@ -178,6 +182,12 @@ static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
};
+static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = {
+ { DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" },
+ { DRM_MODE_PICTURE_ASPECT_4_3, "4:3" },
+ { DRM_MODE_PICTURE_ASPECT_16_9, "16:9" },
+};
+
/*
* Non-global properties, but "required" for certain connectors.
*/
@@ -357,6 +367,32 @@ const char *drm_get_format_name(uint32_t format)
}
EXPORT_SYMBOL(drm_get_format_name);
+/*
+ * Internal function to assign a slot in the object idr and optionally
+ * register the object into the idr.
+ */
+static int drm_mode_object_get_reg(struct drm_device *dev,
+ struct drm_mode_object *obj,
+ uint32_t obj_type,
+ bool register_obj)
+{
+ int ret;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL);
+ if (ret >= 0) {
+ /*
+ * Set up the object linking under the protection of the idr
+ * lock so that other users can't see inconsistent state.
+ */
+ obj->id = ret;
+ obj->type = obj_type;
+ }
+ mutex_unlock(&dev->mode_config.idr_mutex);
+
+ return ret < 0 ? ret : 0;
+}
+
/**
* drm_mode_object_get - allocate a new modeset identifier
* @dev: DRM device
@@ -375,21 +411,15 @@ EXPORT_SYMBOL(drm_get_format_name);
int drm_mode_object_get(struct drm_device *dev,
struct drm_mode_object *obj, uint32_t obj_type)
{
- int ret;
+ return drm_mode_object_get_reg(dev, obj, obj_type, true);
+}
+static void drm_mode_object_register(struct drm_device *dev,
+ struct drm_mode_object *obj)
+{
mutex_lock(&dev->mode_config.idr_mutex);
- ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL);
- if (ret >= 0) {
- /*
- * Set up the object linking under the protection of the idr
- * lock so that other users can't see inconsistent state.
- */
- obj->id = ret;
- obj->type = obj_type;
- }
+ idr_replace(&dev->mode_config.crtc_idr, obj, obj->id);
mutex_unlock(&dev->mode_config.idr_mutex);
-
- return ret < 0 ? ret : 0;
}
/**
@@ -416,8 +446,12 @@ static struct drm_mode_object *_object_find(struct drm_device *dev,
mutex_lock(&dev->mode_config.idr_mutex);
obj = idr_find(&dev->mode_config.crtc_idr, id);
- if (!obj || (type != DRM_MODE_OBJECT_ANY && obj->type != type) ||
- (obj->id != id))
+ if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
+ obj = NULL;
+ if (obj && obj->id != id)
+ obj = NULL;
+ /* don't leak out unref'd fb's */
+ if (obj && (obj->type == DRM_MODE_OBJECT_FB))
obj = NULL;
mutex_unlock(&dev->mode_config.idr_mutex);
@@ -444,9 +478,6 @@ struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
* function.*/
WARN_ON(type == DRM_MODE_OBJECT_FB);
obj = _object_find(dev, id, type);
- /* don't leak out unref'd fb's */
- if (obj && (obj->type == DRM_MODE_OBJECT_FB))
- obj = NULL;
return obj;
}
EXPORT_SYMBOL(drm_mode_object_find);
@@ -723,7 +754,7 @@ DEFINE_WW_CLASS(crtc_ww_class);
*/
int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary,
- void *cursor,
+ struct drm_plane *cursor,
const struct drm_crtc_funcs *funcs)
{
struct drm_mode_config *config = &dev->mode_config;
@@ -748,8 +779,11 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
config->num_crtc++;
crtc->primary = primary;
+ crtc->cursor = cursor;
if (primary)
primary->possible_crtcs = 1 << drm_crtc_index(crtc);
+ if (cursor)
+ cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
out:
drm_modeset_unlock_all(dev);
@@ -842,7 +876,7 @@ int drm_connector_init(struct drm_device *dev,
drm_modeset_lock_all(dev);
- ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
+ ret = drm_mode_object_get_reg(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR, false);
if (ret)
goto out_unlock;
@@ -881,6 +915,8 @@ int drm_connector_init(struct drm_device *dev,
drm_object_attach_property(&connector->base,
dev->mode_config.dpms_property, 0);
+ connector->debugfs_entry = NULL;
+
out_put:
if (ret)
drm_mode_object_put(dev, &connector->base);
@@ -921,6 +957,49 @@ void drm_connector_cleanup(struct drm_connector *connector)
EXPORT_SYMBOL(drm_connector_cleanup);
/**
+ * drm_connector_register - register a connector
+ * @connector: the connector to register
+ *
+ * Register userspace interfaces for a connector
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_register(struct drm_connector *connector)
+{
+ int ret;
+
+ drm_mode_object_register(connector->dev, &connector->base);
+
+ ret = drm_sysfs_connector_add(connector);
+ if (ret)
+ return ret;
+
+ ret = drm_debugfs_connector_add(connector);
+ if (ret) {
+ drm_sysfs_connector_remove(connector);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_register);
+
+/**
+ * drm_connector_unregister - unregister a connector
+ * @connector: the connector to unregister
+ *
+ * Unregister userspace interfaces for a connector
+ */
+void drm_connector_unregister(struct drm_connector *connector)
+{
+ drm_sysfs_connector_remove(connector);
+ drm_debugfs_connector_remove(connector);
+}
+EXPORT_SYMBOL(drm_connector_unregister);
+
+
+/**
* drm_connector_unplug_all - unregister connector userspace interfaces
* @dev: drm device
*
@@ -934,7 +1013,7 @@ void drm_connector_unplug_all(struct drm_device *dev)
/* taking the mode config mutex ends up in a clash with sysfs */
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
}
EXPORT_SYMBOL(drm_connector_unplug_all);
@@ -1214,6 +1293,7 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
{
struct drm_property *edid;
struct drm_property *dpms;
+ struct drm_property *dev_path;
/*
* Standard properties (apply to all connectors)
@@ -1228,6 +1308,12 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
ARRAY_SIZE(drm_dpms_enum_list));
dev->mode_config.dpms_property = dpms;
+ dev_path = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_IMMUTABLE,
+ "PATH", 0);
+ dev->mode_config.path_property = dev_path;
+
return 0;
}
@@ -1384,6 +1470,33 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
/**
+ * drm_mode_create_aspect_ratio_property - create aspect ratio property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
+{
+ if (dev->mode_config.aspect_ratio_property)
+ return 0;
+
+ dev->mode_config.aspect_ratio_property =
+ drm_property_create_enum(dev, 0, "aspect ratio",
+ drm_aspect_ratio_enum_list,
+ ARRAY_SIZE(drm_aspect_ratio_enum_list));
+
+ if (dev->mode_config.aspect_ratio_property == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
+
+/**
* drm_mode_create_dirty_property - create dirty property
* @dev: DRM device
*
@@ -1470,6 +1583,15 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
+void drm_reinit_primary_mode_group(struct drm_device *dev)
+{
+ drm_modeset_lock_all(dev);
+ drm_mode_group_destroy(&dev->primary->mode_group);
+ drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
+ drm_modeset_unlock_all(dev);
+}
+EXPORT_SYMBOL(drm_reinit_primary_mode_group);
+
/**
* drm_crtc_convert_to_umode - convert a drm_display_mode into a modeinfo
* @out: drm_mode_modeinfo struct to return to the user
@@ -2118,45 +2240,32 @@ out:
return ret;
}
-/**
- * drm_mode_setplane - configure a plane's configuration
- * @dev: DRM device
- * @data: ioctl data*
- * @file_priv: DRM file info
+/*
+ * setplane_internal - setplane handler for internal callers
*
- * Set plane configuration, including placement, fb, scaling, and other factors.
- * Or pass a NULL fb to disable.
+ * Note that we assume an extra reference has already been taken on fb. If the
+ * update fails, this reference will be dropped before return; if it succeeds,
+ * the previous framebuffer (if any) will be unreferenced instead.
*
- * Returns:
- * Zero on success, errno on failure.
+ * src_{x,y,w,h} are provided in 16.16 fixed point format
*/
-int drm_mode_setplane(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static int setplane_internal(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int32_t crtc_x, int32_t crtc_y,
+ uint32_t crtc_w, uint32_t crtc_h,
+ /* src_{x,y,w,h} values are 16.16 fixed point */
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
{
- struct drm_mode_set_plane *plane_req = data;
- struct drm_plane *plane;
- struct drm_crtc *crtc;
- struct drm_framebuffer *fb = NULL, *old_fb = NULL;
+ struct drm_device *dev = plane->dev;
+ struct drm_framebuffer *old_fb = NULL;
int ret = 0;
unsigned int fb_width, fb_height;
int i;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- /*
- * First, find the plane, crtc, and fb objects. If not available,
- * we don't bother to call the driver.
- */
- plane = drm_plane_find(dev, plane_req->plane_id);
- if (!plane) {
- DRM_DEBUG_KMS("Unknown plane ID %d\n",
- plane_req->plane_id);
- return -ENOENT;
- }
-
/* No fb means shut it down */
- if (!plane_req->fb_id) {
+ if (!fb) {
drm_modeset_lock_all(dev);
old_fb = plane->fb;
ret = plane->funcs->disable_plane(plane);
@@ -2170,14 +2279,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
goto out;
}
- crtc = drm_crtc_find(dev, plane_req->crtc_id);
- if (!crtc) {
- DRM_DEBUG_KMS("Unknown crtc ID %d\n",
- plane_req->crtc_id);
- ret = -ENOENT;
- goto out;
- }
-
/* Check whether this plane is usable on this CRTC */
if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
DRM_DEBUG_KMS("Invalid crtc for plane\n");
@@ -2185,14 +2286,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
goto out;
}
- fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
- if (!fb) {
- DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
- plane_req->fb_id);
- ret = -ENOENT;
- goto out;
- }
-
/* Check whether this plane supports the fb pixel format. */
for (i = 0; i < plane->format_count; i++)
if (fb->pixel_format == plane->format_types[i])
@@ -2208,43 +2301,25 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
fb_height = fb->height << 16;
/* Make sure source coordinates are inside the fb. */
- if (plane_req->src_w > fb_width ||
- plane_req->src_x > fb_width - plane_req->src_w ||
- plane_req->src_h > fb_height ||
- plane_req->src_y > fb_height - plane_req->src_h) {
+ if (src_w > fb_width ||
+ src_x > fb_width - src_w ||
+ src_h > fb_height ||
+ src_y > fb_height - src_h) {
DRM_DEBUG_KMS("Invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
- plane_req->src_w >> 16,
- ((plane_req->src_w & 0xffff) * 15625) >> 10,
- plane_req->src_h >> 16,
- ((plane_req->src_h & 0xffff) * 15625) >> 10,
- plane_req->src_x >> 16,
- ((plane_req->src_x & 0xffff) * 15625) >> 10,
- plane_req->src_y >> 16,
- ((plane_req->src_y & 0xffff) * 15625) >> 10);
+ src_w >> 16, ((src_w & 0xffff) * 15625) >> 10,
+ src_h >> 16, ((src_h & 0xffff) * 15625) >> 10,
+ src_x >> 16, ((src_x & 0xffff) * 15625) >> 10,
+ src_y >> 16, ((src_y & 0xffff) * 15625) >> 10);
ret = -ENOSPC;
goto out;
}
- /* Give drivers some help against integer overflows */
- if (plane_req->crtc_w > INT_MAX ||
- plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
- plane_req->crtc_h > INT_MAX ||
- plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
- DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
- plane_req->crtc_w, plane_req->crtc_h,
- plane_req->crtc_x, plane_req->crtc_y);
- ret = -ERANGE;
- goto out;
- }
-
drm_modeset_lock_all(dev);
old_fb = plane->fb;
ret = plane->funcs->update_plane(plane, crtc, fb,
- plane_req->crtc_x, plane_req->crtc_y,
- plane_req->crtc_w, plane_req->crtc_h,
- plane_req->src_x, plane_req->src_y,
- plane_req->src_w, plane_req->src_h);
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
if (!ret) {
plane->crtc = crtc;
plane->fb = fb;
@@ -2261,6 +2336,85 @@ out:
drm_framebuffer_unreference(old_fb);
return ret;
+
+}
+
+/**
+ * drm_mode_setplane - configure a plane's configuration
+ * @dev: DRM device
+ * @data: ioctl data*
+ * @file_priv: DRM file info
+ *
+ * Set plane configuration, including placement, fb, scaling, and other factors.
+ * Or pass a NULL fb to disable (planes may be disabled without providing a
+ * valid crtc).
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_setplane(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_set_plane *plane_req = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc = NULL;
+ struct drm_framebuffer *fb = NULL;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ /* Give drivers some help against integer overflows */
+ if (plane_req->crtc_w > INT_MAX ||
+ plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
+ plane_req->crtc_h > INT_MAX ||
+ plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->crtc_x, plane_req->crtc_y);
+ return -ERANGE;
+ }
+
+ /*
+ * First, find the plane, crtc, and fb objects. If not available,
+ * we don't bother to call the driver.
+ */
+ obj = drm_mode_object_find(dev, plane_req->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown plane ID %d\n",
+ plane_req->plane_id);
+ return -ENOENT;
+ }
+ plane = obj_to_plane(obj);
+
+ if (plane_req->fb_id) {
+ fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
+ if (!fb) {
+ DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+ plane_req->fb_id);
+ return -ENOENT;
+ }
+
+ obj = drm_mode_object_find(dev, plane_req->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+ plane_req->crtc_id);
+ return -ENOENT;
+ }
+ crtc = obj_to_crtc(obj);
+ }
+
+ /*
+ * setplane_internal will take care of deref'ing either the old or new
+ * framebuffer depending on success.
+ */
+ return setplane_internal(plane, crtc, fb,
+ plane_req->crtc_x, plane_req->crtc_y,
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->src_x, plane_req->src_y,
+ plane_req->src_w, plane_req->src_h);
}
/**
@@ -2509,6 +2663,102 @@ out:
return ret;
}
+/**
+ * drm_mode_cursor_universal - translate legacy cursor ioctl call into a
+ * universal plane handler call
+ * @crtc: crtc to update cursor for
+ * @req: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Legacy cursor ioctl's work directly with driver buffer handles. To
+ * translate legacy ioctl calls into universal plane handler calls, we need to
+ * wrap the native buffer handle in a drm_framebuffer.
+ *
+ * Note that we assume any handle passed to the legacy ioctls was a 32-bit ARGB
+ * buffer with a pitch of 4*width; the universal plane interface should be used
+ * directly in cases where the hardware can support other buffer settings and
+ * userspace wants to make use of these capabilities.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+static int drm_mode_cursor_universal(struct drm_crtc *crtc,
+ struct drm_mode_cursor2 *req,
+ struct drm_file *file_priv)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_mode_fb_cmd2 fbreq = {
+ .width = req->width,
+ .height = req->height,
+ .pixel_format = DRM_FORMAT_ARGB8888,
+ .pitches = { req->width * 4 },
+ .handles = { req->handle },
+ };
+ int32_t crtc_x, crtc_y;
+ uint32_t crtc_w = 0, crtc_h = 0;
+ uint32_t src_w = 0, src_h = 0;
+ int ret = 0;
+
+ BUG_ON(!crtc->cursor);
+
+ /*
+ * Obtain fb we'll be using (either new or existing) and take an extra
+ * reference to it if fb != null. setplane will take care of dropping
+ * the reference if the plane update fails.
+ */
+ if (req->flags & DRM_MODE_CURSOR_BO) {
+ if (req->handle) {
+ fb = add_framebuffer_internal(dev, &fbreq, file_priv);
+ if (IS_ERR(fb)) {
+ DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
+ return PTR_ERR(fb);
+ }
+
+ drm_framebuffer_reference(fb);
+ } else {
+ fb = NULL;
+ }
+ } else {
+ mutex_lock(&dev->mode_config.mutex);
+ fb = crtc->cursor->fb;
+ if (fb)
+ drm_framebuffer_reference(fb);
+ mutex_unlock(&dev->mode_config.mutex);
+ }
+
+ if (req->flags & DRM_MODE_CURSOR_MOVE) {
+ crtc_x = req->x;
+ crtc_y = req->y;
+ } else {
+ crtc_x = crtc->cursor_x;
+ crtc_y = crtc->cursor_y;
+ }
+
+ if (fb) {
+ crtc_w = fb->width;
+ crtc_h = fb->height;
+ src_w = fb->width << 16;
+ src_h = fb->height << 16;
+ }
+
+ /*
+ * setplane_internal will take care of deref'ing either the old or new
+ * framebuffer depending on success.
+ */
+ ret = setplane_internal(crtc->cursor, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ 0, 0, src_w, src_h);
+
+ /* Update successful; save new cursor position, if necessary */
+ if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
+ crtc->cursor_x = req->x;
+ crtc->cursor_y = req->y;
+ }
+
+ return ret;
+}
+
static int drm_mode_cursor_common(struct drm_device *dev,
struct drm_mode_cursor2 *req,
struct drm_file *file_priv)
@@ -2528,6 +2778,13 @@ static int drm_mode_cursor_common(struct drm_device *dev,
return -ENOENT;
}
+ /*
+ * If this crtc has a universal cursor plane, call that plane's update
+ * handler rather than using legacy cursor handlers.
+ */
+ if (crtc->cursor)
+ return drm_mode_cursor_universal(crtc, req, file_priv);
+
drm_modeset_lock(&crtc->mutex, NULL);
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
@@ -2827,56 +3084,38 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
return 0;
}
-/**
- * drm_mode_addfb2 - add an FB to the graphics configuration
- * @dev: drm device for the ioctl
- * @data: data pointer for the ioctl
- * @file_priv: drm file for the ioctl call
- *
- * Add a new FB to the specified CRTC, given a user request with format. This is
- * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
- * and uses fourcc codes as pixel format specifiers.
- *
- * Called by the user via ioctl.
- *
- * Returns:
- * Zero on success, errno on failure.
- */
-int drm_mode_addfb2(struct drm_device *dev,
- void *data, struct drm_file *file_priv)
+static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *r,
+ struct drm_file *file_priv)
{
- struct drm_mode_fb_cmd2 *r = data;
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
int ret;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
if (r->flags & ~DRM_MODE_FB_INTERLACED) {
DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
if ((config->min_width > r->width) || (r->width > config->max_width)) {
DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
r->width, config->min_width, config->max_width);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
if ((config->min_height > r->height) || (r->height > config->max_height)) {
DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
r->height, config->min_height, config->max_height);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
ret = framebuffer_check(r);
if (ret)
- return ret;
+ return ERR_PTR(ret);
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("could not create framebuffer\n");
- return PTR_ERR(fb);
+ return fb;
}
mutex_lock(&file_priv->fbs_lock);
@@ -2885,8 +3124,37 @@ int drm_mode_addfb2(struct drm_device *dev,
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
mutex_unlock(&file_priv->fbs_lock);
+ return fb;
+}
- return ret;
+/**
+ * drm_mode_addfb2 - add an FB to the graphics configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Add a new FB to the specified CRTC, given a user request with format. This is
+ * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers
+ * and uses fourcc codes as pixel format specifiers.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb2(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_framebuffer *fb;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ fb = add_framebuffer_internal(dev, data, file_priv);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
+
+ return 0;
}
/**
@@ -3176,7 +3444,7 @@ fail:
EXPORT_SYMBOL(drm_property_create);
/**
- * drm_property_create - create a new enumeration property type
+ * drm_property_create_enum - create a new enumeration property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
@@ -3222,7 +3490,7 @@ struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
EXPORT_SYMBOL(drm_property_create_enum);
/**
- * drm_property_create - create a new bitmask property type
+ * drm_property_create_bitmask - create a new bitmask property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
@@ -3242,19 +3510,28 @@ EXPORT_SYMBOL(drm_property_create_enum);
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
int flags, const char *name,
const struct drm_prop_enum_list *props,
- int num_values)
+ int num_props,
+ uint64_t supported_bits)
{
struct drm_property *property;
- int i, ret;
+ int i, ret, index = 0;
+ int num_values = hweight64(supported_bits);
flags |= DRM_MODE_PROP_BITMASK;
property = drm_property_create(dev, flags, name, num_values);
if (!property)
return NULL;
+ for (i = 0; i < num_props; i++) {
+ if (!(supported_bits & (1ULL << props[i].type)))
+ continue;
- for (i = 0; i < num_values; i++) {
- ret = drm_property_add_enum(property, i,
+ if (WARN_ON(index >= num_values)) {
+ drm_property_destroy(dev, property);
+ return NULL;
+ }
+
+ ret = drm_property_add_enum(property, index++,
props[i].type,
props[i].name);
if (ret) {
@@ -3284,7 +3561,7 @@ static struct drm_property *property_create_range(struct drm_device *dev,
}
/**
- * drm_property_create - create a new ranged property type
+ * drm_property_create_range - create a new ranged property type
* @dev: drm device
* @flags: flags specifying the property type
* @name: name of the property
@@ -3703,6 +3980,25 @@ done:
return ret;
}
+int drm_mode_connector_set_path_property(struct drm_connector *connector,
+ char *path)
+{
+ struct drm_device *dev = connector->dev;
+ int ret, size;
+ size = strlen(path) + 1;
+
+ connector->path_blob_ptr = drm_property_create_blob(connector->dev,
+ size, path);
+ if (!connector->path_blob_ptr)
+ return -EINVAL;
+
+ ret = drm_object_property_set_value(&connector->base,
+ dev->mode_config.path_property,
+ connector->path_blob_ptr->base.id);
+ return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_set_path_property);
+
/**
* drm_mode_connector_update_edid_property - update the edid property of a connector
* @connector: drm connector
@@ -3720,6 +4016,10 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
int ret, size;
+ /* ignore requests to set edid when overridden */
+ if (connector->override_edid)
+ return 0;
+
if (connector->edid_blob_ptr)
drm_property_destroy_blob(dev, connector->edid_blob_ptr);
@@ -4680,6 +4980,36 @@ int drm_format_vert_chroma_subsampling(uint32_t format)
EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
/**
+ * drm_rotation_simplify() - Try to simplify the rotation
+ * @rotation: Rotation to be simplified
+ * @supported_rotations: Supported rotations
+ *
+ * Attempt to simplify the rotation to a form that is supported.
+ * Eg. if the hardware supports everything except DRM_REFLECT_X
+ * one could call this function like this:
+ *
+ * drm_rotation_simplify(rotation, BIT(DRM_ROTATE_0) |
+ * BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_180) |
+ * BIT(DRM_ROTATE_270) | BIT(DRM_REFLECT_Y));
+ *
+ * to eliminate the DRM_ROTATE_X flag. Depending on what kind of
+ * transforms the hardware supports, this function may not
+ * be able to produce a supported transform, so the caller should
+ * check the result afterwards.
+ */
+unsigned int drm_rotation_simplify(unsigned int rotation,
+ unsigned int supported_rotations)
+{
+ if (rotation & ~supported_rotations) {
+ rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y);
+ rotation = (rotation & ~0xf) | BIT((ffs(rotation & 0xf) + 1) % 4);
+ }
+
+ return rotation;
+}
+EXPORT_SYMBOL(drm_rotation_simplify);
+
+/**
* drm_mode_config_init - initialize DRM mode_configuration structure
* @dev: DRM device
*
@@ -4797,3 +5127,21 @@ void drm_mode_config_cleanup(struct drm_device *dev)
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
+
+struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
+ unsigned int supported_rotations)
+{
+ static const struct drm_prop_enum_list props[] = {
+ { DRM_ROTATE_0, "rotate-0" },
+ { DRM_ROTATE_90, "rotate-90" },
+ { DRM_ROTATE_180, "rotate-180" },
+ { DRM_ROTATE_270, "rotate-270" },
+ { DRM_REFLECT_X, "reflect-x" },
+ { DRM_REFLECT_Y, "reflect-y" },
+ };
+
+ return drm_property_create_bitmask(dev, 0, "rotation",
+ props, ARRAY_SIZE(props),
+ supported_rotations);
+}
+EXPORT_SYMBOL(drm_mode_create_rotation_property);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 78b37f3febd3..6c65a0a28fbd 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -818,6 +818,7 @@ void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
&fb->bits_per_pixel);
fb->pixel_format = mode_cmd->pixel_format;
+ fb->flags = mode_cmd->flags;
}
EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index b4b51d46f339..13bd42923dd4 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <drm/drmP.h>
+#include <drm/drm_edid.h>
#if defined(CONFIG_DEBUG_FS)
@@ -237,5 +238,186 @@ int drm_debugfs_cleanup(struct drm_minor *minor)
return 0;
}
+static int connector_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ const char *status;
+
+ switch (connector->force) {
+ case DRM_FORCE_ON:
+ status = "on\n";
+ break;
+
+ case DRM_FORCE_ON_DIGITAL:
+ status = "digital\n";
+ break;
+
+ case DRM_FORCE_OFF:
+ status = "off\n";
+ break;
+
+ case DRM_FORCE_UNSPECIFIED:
+ status = "unspecified\n";
+ break;
+
+ default:
+ return 0;
+ }
+
+ seq_puts(m, status);
+
+ return 0;
+}
+
+static int connector_open(struct inode *inode, struct file *file)
+{
+ struct drm_connector *dev = inode->i_private;
+
+ return single_open(file, connector_show, dev);
+}
+
+static ssize_t connector_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_connector *connector = m->private;
+ char buf[12];
+
+ if (len > sizeof(buf) - 1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ if (!strcmp(buf, "on"))
+ connector->force = DRM_FORCE_ON;
+ else if (!strcmp(buf, "digital"))
+ connector->force = DRM_FORCE_ON_DIGITAL;
+ else if (!strcmp(buf, "off"))
+ connector->force = DRM_FORCE_OFF;
+ else if (!strcmp(buf, "unspecified"))
+ connector->force = DRM_FORCE_UNSPECIFIED;
+ else
+ return -EINVAL;
+
+ return len;
+}
+
+static int edid_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct drm_property_blob *edid = connector->edid_blob_ptr;
+
+ if (connector->override_edid && edid)
+ seq_write(m, edid->data, edid->length);
+
+ return 0;
+}
+
+static int edid_open(struct inode *inode, struct file *file)
+{
+ struct drm_connector *dev = inode->i_private;
+
+ return single_open(file, edid_show, dev);
+}
+
+static ssize_t edid_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_connector *connector = m->private;
+ char *buf;
+ struct edid *edid;
+ int ret;
+
+ buf = memdup_user(ubuf, len);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ edid = (struct edid *) buf;
+
+ if (len == 5 && !strncmp(buf, "reset", 5)) {
+ connector->override_edid = false;
+ ret = drm_mode_connector_update_edid_property(connector, NULL);
+ } else if (len < EDID_LENGTH ||
+ EDID_LENGTH * (1 + edid->extensions) > len)
+ ret = -EINVAL;
+ else {
+ connector->override_edid = false;
+ ret = drm_mode_connector_update_edid_property(connector, edid);
+ if (!ret)
+ connector->override_edid = true;
+ }
+
+ kfree(buf);
+
+ return (ret) ? ret : len;
+}
+
+static const struct file_operations drm_edid_fops = {
+ .owner = THIS_MODULE,
+ .open = edid_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = edid_write
+};
+
+
+static const struct file_operations drm_connector_fops = {
+ .owner = THIS_MODULE,
+ .open = connector_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = connector_write
+};
+
+int drm_debugfs_connector_add(struct drm_connector *connector)
+{
+ struct drm_minor *minor = connector->dev->primary;
+ struct dentry *root, *ent;
+
+ if (!minor->debugfs_root)
+ return -1;
+
+ root = debugfs_create_dir(connector->name, minor->debugfs_root);
+ if (!root)
+ return -ENOMEM;
+
+ connector->debugfs_entry = root;
+
+ /* force */
+ ent = debugfs_create_file("force", S_IRUGO | S_IWUSR, root, connector,
+ &drm_connector_fops);
+ if (!ent)
+ goto error;
+
+ /* edid */
+ ent = debugfs_create_file("edid_override", S_IRUGO | S_IWUSR, root,
+ connector, &drm_edid_fops);
+ if (!ent)
+ goto error;
+
+ return 0;
+
+error:
+ debugfs_remove_recursive(connector->debugfs_entry);
+ connector->debugfs_entry = NULL;
+ return -ENOMEM;
+}
+
+void drm_debugfs_connector_remove(struct drm_connector *connector)
+{
+ if (!connector->debugfs_entry)
+ return;
+
+ debugfs_remove_recursive(connector->debugfs_entry);
+
+ connector->debugfs_entry = NULL;
+}
+
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
new file mode 100644
index 000000000000..ac3c2738db94
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -0,0 +1,2715 @@
+/*
+ * Copyright © 2014 Red Hat
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/i2c.h>
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drmP.h>
+
+#include <drm/drm_fixed.h>
+
+/**
+ * DOC: dp mst helper
+ *
+ * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
+ * protocol. The helpers contain a topology manager and bandwidth manager.
+ * The helpers encapsulate the sending and received of sideband msgs.
+ */
+static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
+ char *buf);
+static int test_calc_pbn_mode(void);
+
+static void drm_dp_put_port(struct drm_dp_mst_port *port);
+
+static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
+ int id,
+ struct drm_dp_payload *payload);
+
+static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int offset, int size, u8 *bytes);
+
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb);
+static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port);
+static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
+ u8 *guid);
+
+static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
+static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
+static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
+/* sideband msg handling */
+static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
+{
+ u8 bitmask = 0x80;
+ u8 bitshift = 7;
+ u8 array_index = 0;
+ int number_of_bits = num_nibbles * 4;
+ u8 remainder = 0;
+
+ while (number_of_bits != 0) {
+ number_of_bits--;
+ remainder <<= 1;
+ remainder |= (data[array_index] & bitmask) >> bitshift;
+ bitmask >>= 1;
+ bitshift--;
+ if (bitmask == 0) {
+ bitmask = 0x80;
+ bitshift = 7;
+ array_index++;
+ }
+ if ((remainder & 0x10) == 0x10)
+ remainder ^= 0x13;
+ }
+
+ number_of_bits = 4;
+ while (number_of_bits != 0) {
+ number_of_bits--;
+ remainder <<= 1;
+ if ((remainder & 0x10) != 0)
+ remainder ^= 0x13;
+ }
+
+ return remainder;
+}
+
+static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
+{
+ u8 bitmask = 0x80;
+ u8 bitshift = 7;
+ u8 array_index = 0;
+ int number_of_bits = number_of_bytes * 8;
+ u16 remainder = 0;
+
+ while (number_of_bits != 0) {
+ number_of_bits--;
+ remainder <<= 1;
+ remainder |= (data[array_index] & bitmask) >> bitshift;
+ bitmask >>= 1;
+ bitshift--;
+ if (bitmask == 0) {
+ bitmask = 0x80;
+ bitshift = 7;
+ array_index++;
+ }
+ if ((remainder & 0x100) == 0x100)
+ remainder ^= 0xd5;
+ }
+
+ number_of_bits = 8;
+ while (number_of_bits != 0) {
+ number_of_bits--;
+ remainder <<= 1;
+ if ((remainder & 0x100) != 0)
+ remainder ^= 0xd5;
+ }
+
+ return remainder & 0xff;
+}
+static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
+{
+ u8 size = 3;
+ size += (hdr->lct / 2);
+ return size;
+}
+
+static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
+ u8 *buf, int *len)
+{
+ int idx = 0;
+ int i;
+ u8 crc4;
+ buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
+ for (i = 0; i < (hdr->lct / 2); i++)
+ buf[idx++] = hdr->rad[i];
+ buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
+ (hdr->msg_len & 0x3f);
+ buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
+
+ crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
+ buf[idx - 1] |= (crc4 & 0xf);
+
+ *len = idx;
+}
+
+static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
+ u8 *buf, int buflen, u8 *hdrlen)
+{
+ u8 crc4;
+ u8 len;
+ int i;
+ u8 idx;
+ if (buf[0] == 0)
+ return false;
+ len = 3;
+ len += ((buf[0] & 0xf0) >> 4) / 2;
+ if (len > buflen)
+ return false;
+ crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
+
+ if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
+ DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
+ return false;
+ }
+
+ hdr->lct = (buf[0] & 0xf0) >> 4;
+ hdr->lcr = (buf[0] & 0xf);
+ idx = 1;
+ for (i = 0; i < (hdr->lct / 2); i++)
+ hdr->rad[i] = buf[idx++];
+ hdr->broadcast = (buf[idx] >> 7) & 0x1;
+ hdr->path_msg = (buf[idx] >> 6) & 0x1;
+ hdr->msg_len = buf[idx] & 0x3f;
+ idx++;
+ hdr->somt = (buf[idx] >> 7) & 0x1;
+ hdr->eomt = (buf[idx] >> 6) & 0x1;
+ hdr->seqno = (buf[idx] >> 4) & 0x1;
+ idx++;
+ *hdrlen = idx;
+ return true;
+}
+
+static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
+ struct drm_dp_sideband_msg_tx *raw)
+{
+ int idx = 0;
+ int i;
+ u8 *buf = raw->msg;
+ buf[idx++] = req->req_type & 0x7f;
+
+ switch (req->req_type) {
+ case DP_ENUM_PATH_RESOURCES:
+ buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
+ idx++;
+ break;
+ case DP_ALLOCATE_PAYLOAD:
+ buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
+ (req->u.allocate_payload.number_sdp_streams & 0xf);
+ idx++;
+ buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
+ idx++;
+ buf[idx] = (req->u.allocate_payload.pbn >> 8);
+ idx++;
+ buf[idx] = (req->u.allocate_payload.pbn & 0xff);
+ idx++;
+ for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
+ buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
+ (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
+ idx++;
+ }
+ if (req->u.allocate_payload.number_sdp_streams & 1) {
+ i = req->u.allocate_payload.number_sdp_streams - 1;
+ buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
+ idx++;
+ }
+ break;
+ case DP_QUERY_PAYLOAD:
+ buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
+ idx++;
+ buf[idx] = (req->u.query_payload.vcpi & 0x7f);
+ idx++;
+ break;
+ case DP_REMOTE_DPCD_READ:
+ buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
+ buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
+ idx++;
+ buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
+ idx++;
+ buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
+ idx++;
+ buf[idx] = (req->u.dpcd_read.num_bytes);
+ idx++;
+ break;
+
+ case DP_REMOTE_DPCD_WRITE:
+ buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
+ buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
+ idx++;
+ buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
+ idx++;
+ buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
+ idx++;
+ buf[idx] = (req->u.dpcd_write.num_bytes);
+ idx++;
+ memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
+ idx += req->u.dpcd_write.num_bytes;
+ break;
+ case DP_REMOTE_I2C_READ:
+ buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
+ buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
+ idx++;
+ for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
+ buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
+ idx++;
+ buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
+ idx++;
+ memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
+ idx += req->u.i2c_read.transactions[i].num_bytes;
+
+ buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
+ buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
+ idx++;
+ }
+ buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
+ idx++;
+ buf[idx] = (req->u.i2c_read.num_bytes_read);
+ idx++;
+ break;
+
+ case DP_REMOTE_I2C_WRITE:
+ buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
+ idx++;
+ buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
+ idx++;
+ buf[idx] = (req->u.i2c_write.num_bytes);
+ idx++;
+ memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
+ idx += req->u.i2c_write.num_bytes;
+ break;
+ }
+ raw->cur_len = idx;
+}
+
+static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
+{
+ u8 crc4;
+ crc4 = drm_dp_msg_data_crc4(msg, len);
+ msg[len] = crc4;
+}
+
+static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
+ struct drm_dp_sideband_msg_tx *raw)
+{
+ int idx = 0;
+ u8 *buf = raw->msg;
+
+ buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
+
+ raw->cur_len = idx;
+}
+
+/* this adds a chunk of msg to the builder to get the final msg */
+static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
+ u8 *replybuf, u8 replybuflen, bool hdr)
+{
+ int ret;
+ u8 crc4;
+
+ if (hdr) {
+ u8 hdrlen;
+ struct drm_dp_sideband_msg_hdr recv_hdr;
+ ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
+ if (ret == false) {
+ print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
+ return false;
+ }
+
+ /* get length contained in this portion */
+ msg->curchunk_len = recv_hdr.msg_len;
+ msg->curchunk_hdrlen = hdrlen;
+
+ /* we have already gotten an somt - don't bother parsing */
+ if (recv_hdr.somt && msg->have_somt)
+ return false;
+
+ if (recv_hdr.somt) {
+ memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
+ msg->have_somt = true;
+ }
+ if (recv_hdr.eomt)
+ msg->have_eomt = true;
+
+ /* copy the bytes for the remainder of this header chunk */
+ msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
+ memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
+ } else {
+ memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
+ msg->curchunk_idx += replybuflen;
+ }
+
+ if (msg->curchunk_idx >= msg->curchunk_len) {
+ /* do CRC */
+ crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
+ /* copy chunk into bigger msg */
+ memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
+ msg->curlen += msg->curchunk_len - 1;
+ }
+ return true;
+}
+
+static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+ int i;
+ memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
+ idx += 16;
+ repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ for (i = 0; i < repmsg->u.link_addr.nports; i++) {
+ if (raw->msg[idx] & 0x80)
+ repmsg->u.link_addr.ports[i].input_port = 1;
+
+ repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
+ repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
+
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
+ repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
+ if (repmsg->u.link_addr.ports[i].input_port == 0)
+ repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ if (repmsg->u.link_addr.ports[i].input_port == 0) {
+ repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
+ idx += 16;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
+ repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
+ idx++;
+
+ }
+ if (idx > raw->curlen)
+ goto fail_len;
+ }
+
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+ repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
+ if (idx > raw->curlen)
+ goto fail_len;
+
+ memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+ repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+
+ repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
+ idx++;
+ /* TODO check */
+ memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+ repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
+ idx += 2;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
+ idx += 2;
+ if (idx > raw->curlen)
+ goto fail_len;
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+ repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.allocate_payload.vcpi = raw->msg[idx];
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
+ idx += 2;
+ if (idx > raw->curlen)
+ goto fail_len;
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+ repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+ repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
+ idx += 2;
+ if (idx > raw->curlen)
+ goto fail_len;
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *msg)
+{
+ memset(msg, 0, sizeof(*msg));
+ msg->reply_type = (raw->msg[0] & 0x80) >> 7;
+ msg->req_type = (raw->msg[0] & 0x7f);
+
+ if (msg->reply_type) {
+ memcpy(msg->u.nak.guid, &raw->msg[1], 16);
+ msg->u.nak.reason = raw->msg[17];
+ msg->u.nak.nak_data = raw->msg[18];
+ return false;
+ }
+
+ switch (msg->req_type) {
+ case DP_LINK_ADDRESS:
+ return drm_dp_sideband_parse_link_address(raw, msg);
+ case DP_QUERY_PAYLOAD:
+ return drm_dp_sideband_parse_query_payload_ack(raw, msg);
+ case DP_REMOTE_DPCD_READ:
+ return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
+ case DP_REMOTE_DPCD_WRITE:
+ return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
+ case DP_REMOTE_I2C_READ:
+ return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
+ case DP_ENUM_PATH_RESOURCES:
+ return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
+ case DP_ALLOCATE_PAYLOAD:
+ return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
+ default:
+ DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
+ return false;
+ }
+}
+
+static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_req_body *msg)
+{
+ int idx = 1;
+
+ msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+
+ memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
+ idx += 16;
+ if (idx > raw->curlen)
+ goto fail_len;
+
+ msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
+ msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
+ msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
+ msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
+ msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
+ idx++;
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_req_body *msg)
+{
+ int idx = 1;
+
+ msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
+ idx++;
+ if (idx > raw->curlen)
+ goto fail_len;
+
+ memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
+ idx += 16;
+ if (idx > raw->curlen)
+ goto fail_len;
+
+ msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
+ idx++;
+ return true;
+fail_len:
+ DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
+ return false;
+}
+
+static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_req_body *msg)
+{
+ memset(msg, 0, sizeof(*msg));
+ msg->req_type = (raw->msg[0] & 0x7f);
+
+ switch (msg->req_type) {
+ case DP_CONNECTION_STATUS_NOTIFY:
+ return drm_dp_sideband_parse_connection_status_notify(raw, msg);
+ case DP_RESOURCE_STATUS_NOTIFY:
+ return drm_dp_sideband_parse_resource_status_notify(raw, msg);
+ default:
+ DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
+ return false;
+ }
+}
+
+static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
+{
+ struct drm_dp_sideband_msg_req_body req;
+
+ req.req_type = DP_REMOTE_DPCD_WRITE;
+ req.u.dpcd_write.port_number = port_num;
+ req.u.dpcd_write.dpcd_address = offset;
+ req.u.dpcd_write.num_bytes = num_bytes;
+ req.u.dpcd_write.bytes = bytes;
+ drm_dp_encode_sideband_req(&req, msg);
+
+ return 0;
+}
+
+static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
+{
+ struct drm_dp_sideband_msg_req_body req;
+
+ req.req_type = DP_LINK_ADDRESS;
+ drm_dp_encode_sideband_req(&req, msg);
+ return 0;
+}
+
+static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
+{
+ struct drm_dp_sideband_msg_req_body req;
+
+ req.req_type = DP_ENUM_PATH_RESOURCES;
+ req.u.port_num.port_number = port_num;
+ drm_dp_encode_sideband_req(&req, msg);
+ msg->path_msg = true;
+ return 0;
+}
+
+static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
+ u8 vcpi, uint16_t pbn)
+{
+ struct drm_dp_sideband_msg_req_body req;
+ memset(&req, 0, sizeof(req));
+ req.req_type = DP_ALLOCATE_PAYLOAD;
+ req.u.allocate_payload.port_number = port_num;
+ req.u.allocate_payload.vcpi = vcpi;
+ req.u.allocate_payload.pbn = pbn;
+ drm_dp_encode_sideband_req(&req, msg);
+ msg->path_msg = true;
+ return 0;
+}
+
+static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_vcpi *vcpi)
+{
+ int ret;
+
+ mutex_lock(&mgr->payload_lock);
+ ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
+ if (ret > mgr->max_payloads) {
+ ret = -EINVAL;
+ DRM_DEBUG_KMS("out of payload ids %d\n", ret);
+ goto out_unlock;
+ }
+
+ set_bit(ret, &mgr->payload_mask);
+ vcpi->vcpi = ret;
+ mgr->proposed_vcpis[ret - 1] = vcpi;
+out_unlock:
+ mutex_unlock(&mgr->payload_lock);
+ return ret;
+}
+
+static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
+ int id)
+{
+ if (id == 0)
+ return;
+
+ mutex_lock(&mgr->payload_lock);
+ DRM_DEBUG_KMS("putting payload %d\n", id);
+ clear_bit(id, &mgr->payload_mask);
+ mgr->proposed_vcpis[id - 1] = NULL;
+ mutex_unlock(&mgr->payload_lock);
+}
+
+static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_sideband_msg_tx *txmsg)
+{
+ bool ret;
+ mutex_lock(&mgr->qlock);
+ ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
+ txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
+ mutex_unlock(&mgr->qlock);
+ return ret;
+}
+
+static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_sideband_msg_tx *txmsg)
+{
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
+ int ret;
+
+ ret = wait_event_timeout(mgr->tx_waitq,
+ check_txmsg_state(mgr, txmsg),
+ (4 * HZ));
+ mutex_lock(&mstb->mgr->qlock);
+ if (ret > 0) {
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
+ ret = -EIO;
+ goto out;
+ }
+ } else {
+ DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
+
+ /* dump some state */
+ ret = -EIO;
+
+ /* remove from q */
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
+ txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
+ list_del(&txmsg->next);
+ }
+
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
+ txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
+ mstb->tx_slots[txmsg->seqno] = NULL;
+ }
+ }
+out:
+ mutex_unlock(&mgr->qlock);
+
+ return ret;
+}
+
+static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
+{
+ struct drm_dp_mst_branch *mstb;
+
+ mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
+ if (!mstb)
+ return NULL;
+
+ mstb->lct = lct;
+ if (lct > 1)
+ memcpy(mstb->rad, rad, lct / 2);
+ INIT_LIST_HEAD(&mstb->ports);
+ kref_init(&mstb->kref);
+ return mstb;
+}
+
+static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+{
+ struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
+ struct drm_dp_mst_port *port, *tmp;
+ bool wake_tx = false;
+
+ cancel_work_sync(&mstb->mgr->work);
+
+ /*
+ * destroy all ports - don't need lock
+ * as there are no more references to the mst branch
+ * device at this point.
+ */
+ list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
+ list_del(&port->next);
+ drm_dp_put_port(port);
+ }
+
+ /* drop any tx slots msg */
+ mutex_lock(&mstb->mgr->qlock);
+ if (mstb->tx_slots[0]) {
+ mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ mstb->tx_slots[0] = NULL;
+ wake_tx = true;
+ }
+ if (mstb->tx_slots[1]) {
+ mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ mstb->tx_slots[1] = NULL;
+ wake_tx = true;
+ }
+ mutex_unlock(&mstb->mgr->qlock);
+
+ if (wake_tx)
+ wake_up(&mstb->mgr->tx_waitq);
+ kfree(mstb);
+}
+
+static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
+{
+ kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
+}
+
+
+static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
+{
+ switch (old_pdt) {
+ case DP_PEER_DEVICE_DP_LEGACY_CONV:
+ case DP_PEER_DEVICE_SST_SINK:
+ /* remove i2c over sideband */
+ drm_dp_mst_unregister_i2c_bus(&port->aux);
+ break;
+ case DP_PEER_DEVICE_MST_BRANCHING:
+ drm_dp_put_mst_branch_device(port->mstb);
+ port->mstb = NULL;
+ break;
+ }
+}
+
+static void drm_dp_destroy_port(struct kref *kref)
+{
+ struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+ if (!port->input) {
+ port->vcpi.num_slots = 0;
+ if (port->connector)
+ (*port->mgr->cbs->destroy_connector)(mgr, port->connector);
+ drm_dp_port_teardown_pdt(port, port->pdt);
+
+ if (!port->input && port->vcpi.vcpi > 0)
+ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+ }
+ kfree(port);
+
+ (*mgr->cbs->hotplug)(mgr);
+}
+
+static void drm_dp_put_port(struct drm_dp_mst_port *port)
+{
+ kref_put(&port->kref, drm_dp_destroy_port);
+}
+
+static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
+{
+ struct drm_dp_mst_port *port;
+ struct drm_dp_mst_branch *rmstb;
+ if (to_find == mstb) {
+ kref_get(&mstb->kref);
+ return mstb;
+ }
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (port->mstb) {
+ rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
+ if (rmstb)
+ return rmstb;
+ }
+ }
+ return NULL;
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_branch *rmstb = NULL;
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_primary)
+ rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
+ mutex_unlock(&mgr->lock);
+ return rmstb;
+}
+
+static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
+{
+ struct drm_dp_mst_port *port, *mport;
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (port == to_find) {
+ kref_get(&port->kref);
+ return port;
+ }
+ if (port->mstb) {
+ mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
+ if (mport)
+ return mport;
+ }
+ }
+ return NULL;
+}
+
+static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+ struct drm_dp_mst_port *rport = NULL;
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_primary)
+ rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
+ mutex_unlock(&mgr->lock);
+ return rport;
+}
+
+static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
+{
+ struct drm_dp_mst_port *port;
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (port->port_num == port_num) {
+ kref_get(&port->kref);
+ return port;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * calculate a new RAD for this MST branch device
+ * if parent has an LCT of 2 then it has 1 nibble of RAD,
+ * if parent has an LCT of 3 then it has 2 nibbles of RAD,
+ */
+static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
+ u8 *rad)
+{
+ int lct = port->parent->lct;
+ int shift = 4;
+ int idx = lct / 2;
+ if (lct > 1) {
+ memcpy(rad, port->parent->rad, idx);
+ shift = (lct % 2) ? 4 : 0;
+ } else
+ rad[0] = 0;
+
+ rad[idx] |= port->port_num << shift;
+ return lct + 1;
+}
+
+/*
+ * return sends link address for new mstb
+ */
+static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
+{
+ int ret;
+ u8 rad[6], lct;
+ bool send_link = false;
+ switch (port->pdt) {
+ case DP_PEER_DEVICE_DP_LEGACY_CONV:
+ case DP_PEER_DEVICE_SST_SINK:
+ /* add i2c over sideband */
+ ret = drm_dp_mst_register_i2c_bus(&port->aux);
+ break;
+ case DP_PEER_DEVICE_MST_BRANCHING:
+ lct = drm_dp_calculate_rad(port, rad);
+
+ port->mstb = drm_dp_add_mst_branch_device(lct, rad);
+ port->mstb->mgr = port->mgr;
+ port->mstb->port_parent = port;
+
+ send_link = true;
+ break;
+ }
+ return send_link;
+}
+
+static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port)
+{
+ int ret;
+ if (port->dpcd_rev >= 0x12) {
+ port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
+ if (!port->guid_valid) {
+ ret = drm_dp_send_dpcd_write(mstb->mgr,
+ port,
+ DP_GUID,
+ 16, port->guid);
+ port->guid_valid = true;
+ }
+ }
+}
+
+static void build_mst_prop_path(struct drm_dp_mst_port *port,
+ struct drm_dp_mst_branch *mstb,
+ char *proppath)
+{
+ int i;
+ char temp[8];
+ snprintf(proppath, 255, "mst:%d", mstb->mgr->conn_base_id);
+ for (i = 0; i < (mstb->lct - 1); i++) {
+ int shift = (i % 2) ? 0 : 4;
+ int port_num = mstb->rad[i / 2] >> shift;
+ snprintf(temp, 8, "-%d", port_num);
+ strncat(proppath, temp, 255);
+ }
+ snprintf(temp, 8, "-%d", port->port_num);
+ strncat(proppath, temp, 255);
+}
+
+static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
+ struct device *dev,
+ struct drm_dp_link_addr_reply_port *port_msg)
+{
+ struct drm_dp_mst_port *port;
+ bool ret;
+ bool created = false;
+ int old_pdt = 0;
+ int old_ddps = 0;
+ port = drm_dp_get_port(mstb, port_msg->port_number);
+ if (!port) {
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return;
+ kref_init(&port->kref);
+ port->parent = mstb;
+ port->port_num = port_msg->port_number;
+ port->mgr = mstb->mgr;
+ port->aux.name = "DPMST";
+ port->aux.dev = dev;
+ created = true;
+ } else {
+ old_pdt = port->pdt;
+ old_ddps = port->ddps;
+ }
+
+ port->pdt = port_msg->peer_device_type;
+ port->input = port_msg->input_port;
+ port->mcs = port_msg->mcs;
+ port->ddps = port_msg->ddps;
+ port->ldps = port_msg->legacy_device_plug_status;
+ port->dpcd_rev = port_msg->dpcd_revision;
+ port->num_sdp_streams = port_msg->num_sdp_streams;
+ port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
+ memcpy(port->guid, port_msg->peer_guid, 16);
+
+ /* manage mstb port lists with mgr lock - take a reference
+ for this list */
+ if (created) {
+ mutex_lock(&mstb->mgr->lock);
+ kref_get(&port->kref);
+ list_add(&port->next, &mstb->ports);
+ mutex_unlock(&mstb->mgr->lock);
+ }
+
+ if (old_ddps != port->ddps) {
+ if (port->ddps) {
+ drm_dp_check_port_guid(mstb, port);
+ if (!port->input)
+ drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
+ } else {
+ port->guid_valid = false;
+ port->available_pbn = 0;
+ }
+ }
+
+ if (old_pdt != port->pdt && !port->input) {
+ drm_dp_port_teardown_pdt(port, old_pdt);
+
+ ret = drm_dp_port_setup_pdt(port);
+ if (ret == true) {
+ drm_dp_send_link_address(mstb->mgr, port->mstb);
+ port->mstb->link_address_sent = true;
+ }
+ }
+
+ if (created && !port->input) {
+ char proppath[255];
+ build_mst_prop_path(port, mstb, proppath);
+ port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+ }
+
+ /* put reference to this port */
+ drm_dp_put_port(port);
+}
+
+static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_connection_status_notify *conn_stat)
+{
+ struct drm_dp_mst_port *port;
+ int old_pdt;
+ int old_ddps;
+ bool dowork = false;
+ port = drm_dp_get_port(mstb, conn_stat->port_number);
+ if (!port)
+ return;
+
+ old_ddps = port->ddps;
+ old_pdt = port->pdt;
+ port->pdt = conn_stat->peer_device_type;
+ port->mcs = conn_stat->message_capability_status;
+ port->ldps = conn_stat->legacy_device_plug_status;
+ port->ddps = conn_stat->displayport_device_plug_status;
+
+ if (old_ddps != port->ddps) {
+ if (port->ddps) {
+ drm_dp_check_port_guid(mstb, port);
+ dowork = true;
+ } else {
+ port->guid_valid = false;
+ port->available_pbn = 0;
+ }
+ }
+ if (old_pdt != port->pdt && !port->input) {
+ drm_dp_port_teardown_pdt(port, old_pdt);
+
+ if (drm_dp_port_setup_pdt(port))
+ dowork = true;
+ }
+
+ drm_dp_put_port(port);
+ if (dowork)
+ queue_work(system_long_wq, &mstb->mgr->work);
+
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
+ u8 lct, u8 *rad)
+{
+ struct drm_dp_mst_branch *mstb;
+ struct drm_dp_mst_port *port;
+ int i;
+ /* find the port by iterating down */
+ mstb = mgr->mst_primary;
+
+ for (i = 0; i < lct - 1; i++) {
+ int shift = (i % 2) ? 0 : 4;
+ int port_num = rad[i / 2] >> shift;
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (port->port_num == port_num) {
+ if (!port->mstb) {
+ DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
+ return NULL;
+ }
+
+ mstb = port->mstb;
+ break;
+ }
+ }
+ }
+ kref_get(&mstb->kref);
+ return mstb;
+}
+
+static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_port *port;
+
+ if (!mstb->link_address_sent) {
+ drm_dp_send_link_address(mgr, mstb);
+ mstb->link_address_sent = true;
+ }
+ list_for_each_entry(port, &mstb->ports, next) {
+ if (port->input)
+ continue;
+
+ if (!port->ddps)
+ continue;
+
+ if (!port->available_pbn)
+ drm_dp_send_enum_path_resources(mgr, mstb, port);
+
+ if (port->mstb)
+ drm_dp_check_and_send_link_address(mgr, port->mstb);
+ }
+}
+
+static void drm_dp_mst_link_probe_work(struct work_struct *work)
+{
+ struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
+
+ drm_dp_check_and_send_link_address(mgr, mgr->mst_primary);
+
+}
+
+static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
+ u8 *guid)
+{
+ static u8 zero_guid[16];
+
+ if (!memcmp(guid, zero_guid, 16)) {
+ u64 salt = get_jiffies_64();
+ memcpy(&guid[0], &salt, sizeof(u64));
+ memcpy(&guid[8], &salt, sizeof(u64));
+ return false;
+ }
+ return true;
+}
+
+#if 0
+static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
+{
+ struct drm_dp_sideband_msg_req_body req;
+
+ req.req_type = DP_REMOTE_DPCD_READ;
+ req.u.dpcd_read.port_number = port_num;
+ req.u.dpcd_read.dpcd_address = offset;
+ req.u.dpcd_read.num_bytes = num_bytes;
+ drm_dp_encode_sideband_req(&req, msg);
+
+ return 0;
+}
+#endif
+
+static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
+ bool up, u8 *msg, int len)
+{
+ int ret;
+ int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
+ int tosend, total, offset;
+ int retries = 0;
+
+retry:
+ total = len;
+ offset = 0;
+ do {
+ tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
+
+ ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
+ &msg[offset],
+ tosend);
+ if (ret != tosend) {
+ if (ret == -EIO && retries < 5) {
+ retries++;
+ goto retry;
+ }
+ DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
+ WARN(1, "fail\n");
+
+ return -EIO;
+ }
+ offset += tosend;
+ total -= tosend;
+ } while (total > 0);
+ return 0;
+}
+
+static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
+ struct drm_dp_sideband_msg_tx *txmsg)
+{
+ struct drm_dp_mst_branch *mstb = txmsg->dst;
+
+ /* both msg slots are full */
+ if (txmsg->seqno == -1) {
+ if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
+ DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
+ return -EAGAIN;
+ }
+ if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
+ txmsg->seqno = mstb->last_seqno;
+ mstb->last_seqno ^= 1;
+ } else if (mstb->tx_slots[0] == NULL)
+ txmsg->seqno = 0;
+ else
+ txmsg->seqno = 1;
+ mstb->tx_slots[txmsg->seqno] = txmsg;
+ }
+ hdr->broadcast = 0;
+ hdr->path_msg = txmsg->path_msg;
+ hdr->lct = mstb->lct;
+ hdr->lcr = mstb->lct - 1;
+ if (mstb->lct > 1)
+ memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
+ hdr->seqno = txmsg->seqno;
+ return 0;
+}
+/*
+ * process a single block of the next message in the sideband queue
+ */
+static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_sideband_msg_tx *txmsg,
+ bool up)
+{
+ u8 chunk[48];
+ struct drm_dp_sideband_msg_hdr hdr;
+ int len, space, idx, tosend;
+ int ret;
+
+ memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
+
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
+ txmsg->seqno = -1;
+ txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
+ }
+
+ /* make hdr from dst mst - for replies use seqno
+ otherwise assign one */
+ ret = set_hdr_from_dst_qlock(&hdr, txmsg);
+ if (ret < 0)
+ return ret;
+
+ /* amount left to send in this message */
+ len = txmsg->cur_len - txmsg->cur_offset;
+
+ /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
+ space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
+
+ tosend = min(len, space);
+ if (len == txmsg->cur_len)
+ hdr.somt = 1;
+ if (space >= len)
+ hdr.eomt = 1;
+
+
+ hdr.msg_len = tosend + 1;
+ drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
+ memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
+ /* add crc at end */
+ drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
+ idx += tosend + 1;
+
+ ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
+ if (ret) {
+ DRM_DEBUG_KMS("sideband msg failed to send\n");
+ return ret;
+ }
+
+ txmsg->cur_offset += tosend;
+ if (txmsg->cur_offset == txmsg->cur_len) {
+ txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
+ return 1;
+ }
+ return 0;
+}
+
+/* must be called holding qlock */
+static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_dp_sideband_msg_tx *txmsg;
+ int ret;
+
+ /* construct a chunk from the first msg in the tx_msg queue */
+ if (list_empty(&mgr->tx_msg_downq)) {
+ mgr->tx_down_in_progress = false;
+ return;
+ }
+ mgr->tx_down_in_progress = true;
+
+ txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
+ ret = process_single_tx_qlock(mgr, txmsg, false);
+ if (ret == 1) {
+ /* txmsg is sent it should be in the slots now */
+ list_del(&txmsg->next);
+ } else if (ret) {
+ DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+ list_del(&txmsg->next);
+ if (txmsg->seqno != -1)
+ txmsg->dst->tx_slots[txmsg->seqno] = NULL;
+ txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ wake_up(&mgr->tx_waitq);
+ }
+ if (list_empty(&mgr->tx_msg_downq)) {
+ mgr->tx_down_in_progress = false;
+ return;
+ }
+}
+
+/* called holding qlock */
+static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_dp_sideband_msg_tx *txmsg;
+ int ret;
+
+ /* construct a chunk from the first msg in the tx_msg queue */
+ if (list_empty(&mgr->tx_msg_upq)) {
+ mgr->tx_up_in_progress = false;
+ return;
+ }
+
+ txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
+ ret = process_single_tx_qlock(mgr, txmsg, true);
+ if (ret == 1) {
+ /* up txmsgs aren't put in slots - so free after we send it */
+ list_del(&txmsg->next);
+ kfree(txmsg);
+ } else if (ret)
+ DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+ mgr->tx_up_in_progress = true;
+}
+
+static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_sideband_msg_tx *txmsg)
+{
+ mutex_lock(&mgr->qlock);
+ list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
+ if (!mgr->tx_down_in_progress)
+ process_single_down_tx_qlock(mgr);
+ mutex_unlock(&mgr->qlock);
+}
+
+static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb)
+{
+ int len;
+ struct drm_dp_sideband_msg_tx *txmsg;
+ int ret;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg)
+ return -ENOMEM;
+
+ txmsg->dst = mstb;
+ len = build_link_address(txmsg);
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret > 0) {
+ int i;
+
+ if (txmsg->reply.reply_type == 1)
+ DRM_DEBUG_KMS("link address nak received\n");
+ else {
+ DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
+ for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
+ DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
+ txmsg->reply.u.link_addr.ports[i].input_port,
+ txmsg->reply.u.link_addr.ports[i].peer_device_type,
+ txmsg->reply.u.link_addr.ports[i].port_number,
+ txmsg->reply.u.link_addr.ports[i].dpcd_revision,
+ txmsg->reply.u.link_addr.ports[i].mcs,
+ txmsg->reply.u.link_addr.ports[i].ddps,
+ txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
+ txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
+ txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
+ }
+ for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
+ drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
+ }
+ (*mgr->cbs->hotplug)(mgr);
+ }
+ } else
+ DRM_DEBUG_KMS("link address failed %d\n", ret);
+
+ kfree(txmsg);
+ return 0;
+}
+
+static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *port)
+{
+ int len;
+ struct drm_dp_sideband_msg_tx *txmsg;
+ int ret;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg)
+ return -ENOMEM;
+
+ txmsg->dst = mstb;
+ len = build_enum_path_resources(txmsg, port->port_num);
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret > 0) {
+ if (txmsg->reply.reply_type == 1)
+ DRM_DEBUG_KMS("enum path resources nak received\n");
+ else {
+ if (port->port_num != txmsg->reply.u.path_resources.port_number)
+ DRM_ERROR("got incorrect port in response\n");
+ DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
+ txmsg->reply.u.path_resources.avail_payload_bw_number);
+ port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
+ }
+ }
+
+ kfree(txmsg);
+ return 0;
+}
+
+static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int id,
+ int pbn)
+{
+ struct drm_dp_sideband_msg_tx *txmsg;
+ struct drm_dp_mst_branch *mstb;
+ int len, ret;
+
+ mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ if (!mstb)
+ return -EINVAL;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg) {
+ ret = -ENOMEM;
+ goto fail_put;
+ }
+
+ txmsg->dst = mstb;
+ len = build_allocate_payload(txmsg, port->port_num,
+ id,
+ pbn);
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret > 0) {
+ if (txmsg->reply.reply_type == 1) {
+ ret = -EINVAL;
+ } else
+ ret = 0;
+ }
+ kfree(txmsg);
+fail_put:
+ drm_dp_put_mst_branch_device(mstb);
+ return ret;
+}
+
+static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
+ int id,
+ struct drm_dp_payload *payload)
+{
+ int ret;
+
+ ret = drm_dp_dpcd_write_payload(mgr, id, payload);
+ if (ret < 0) {
+ payload->payload_state = 0;
+ return ret;
+ }
+ payload->payload_state = DP_PAYLOAD_LOCAL;
+ return 0;
+}
+
+static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int id,
+ struct drm_dp_payload *payload)
+{
+ int ret;
+ ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
+ if (ret < 0)
+ return ret;
+ payload->payload_state = DP_PAYLOAD_REMOTE;
+ return ret;
+}
+
+static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int id,
+ struct drm_dp_payload *payload)
+{
+ DRM_DEBUG_KMS("\n");
+ /* its okay for these to fail */
+ if (port) {
+ drm_dp_payload_send_msg(mgr, port, id, 0);
+ }
+
+ drm_dp_dpcd_write_payload(mgr, id, payload);
+ payload->payload_state = 0;
+ return 0;
+}
+
+static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
+ int id,
+ struct drm_dp_payload *payload)
+{
+ payload->payload_state = 0;
+ return 0;
+}
+
+/**
+ * drm_dp_update_payload_part1() - Execute payload update part 1
+ * @mgr: manager to use.
+ *
+ * This iterates over all proposed virtual channels, and tries to
+ * allocate space in the link for them. For 0->slots transitions,
+ * this step just writes the VCPI to the MST device. For slots->0
+ * transitions, this writes the updated VCPIs and removes the
+ * remote VC payloads.
+ *
+ * after calling this the driver should generate ACT and payload
+ * packets.
+ */
+int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+{
+ int i;
+ int cur_slots = 1;
+ struct drm_dp_payload req_payload;
+ struct drm_dp_mst_port *port;
+
+ mutex_lock(&mgr->payload_lock);
+ for (i = 0; i < mgr->max_payloads; i++) {
+ /* solve the current payloads - compare to the hw ones
+ - update the hw view */
+ req_payload.start_slot = cur_slots;
+ if (mgr->proposed_vcpis[i]) {
+ port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+ req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
+ } else {
+ port = NULL;
+ req_payload.num_slots = 0;
+ }
+ /* work out what is required to happen with this payload */
+ if (mgr->payloads[i].start_slot != req_payload.start_slot ||
+ mgr->payloads[i].num_slots != req_payload.num_slots) {
+
+ /* need to push an update for this payload */
+ if (req_payload.num_slots) {
+ drm_dp_create_payload_step1(mgr, i + 1, &req_payload);
+ mgr->payloads[i].num_slots = req_payload.num_slots;
+ } else if (mgr->payloads[i].num_slots) {
+ mgr->payloads[i].num_slots = 0;
+ drm_dp_destroy_payload_step1(mgr, port, i + 1, &mgr->payloads[i]);
+ req_payload.payload_state = mgr->payloads[i].payload_state;
+ } else
+ req_payload.payload_state = 0;
+
+ mgr->payloads[i].start_slot = req_payload.start_slot;
+ mgr->payloads[i].payload_state = req_payload.payload_state;
+ }
+ cur_slots += req_payload.num_slots;
+ }
+ mutex_unlock(&mgr->payload_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_update_payload_part1);
+
+/**
+ * drm_dp_update_payload_part2() - Execute payload update part 2
+ * @mgr: manager to use.
+ *
+ * This iterates over all proposed virtual channels, and tries to
+ * allocate space in the link for them. For 0->slots transitions,
+ * this step writes the remote VC payload commands. For slots->0
+ * this just resets some internal state.
+ */
+int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_dp_mst_port *port;
+ int i;
+ int ret = 0;
+ mutex_lock(&mgr->payload_lock);
+ for (i = 0; i < mgr->max_payloads; i++) {
+
+ if (!mgr->proposed_vcpis[i])
+ continue;
+
+ port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+
+ DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
+ if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
+ ret = drm_dp_create_payload_step2(mgr, port, i + 1, &mgr->payloads[i]);
+ } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
+ ret = drm_dp_destroy_payload_step2(mgr, i + 1, &mgr->payloads[i]);
+ }
+ if (ret) {
+ mutex_unlock(&mgr->payload_lock);
+ return ret;
+ }
+ }
+ mutex_unlock(&mgr->payload_lock);
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_update_payload_part2);
+
+#if 0 /* unused as of yet */
+static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int offset, int size)
+{
+ int len;
+ struct drm_dp_sideband_msg_tx *txmsg;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg)
+ return -ENOMEM;
+
+ len = build_dpcd_read(txmsg, port->port_num, 0, 8);
+ txmsg->dst = port->parent;
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ return 0;
+}
+#endif
+
+static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int offset, int size, u8 *bytes)
+{
+ int len;
+ int ret;
+ struct drm_dp_sideband_msg_tx *txmsg;
+ struct drm_dp_mst_branch *mstb;
+
+ mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ if (!mstb)
+ return -EINVAL;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg) {
+ ret = -ENOMEM;
+ goto fail_put;
+ }
+
+ len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
+ txmsg->dst = mstb;
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret > 0) {
+ if (txmsg->reply.reply_type == 1) {
+ ret = -EINVAL;
+ } else
+ ret = 0;
+ }
+ kfree(txmsg);
+fail_put:
+ drm_dp_put_mst_branch_device(mstb);
+ return ret;
+}
+
+static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
+{
+ struct drm_dp_sideband_msg_reply_body reply;
+
+ reply.reply_type = 1;
+ reply.req_type = req_type;
+ drm_dp_encode_sideband_reply(&reply, msg);
+ return 0;
+}
+
+static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb,
+ int req_type, int seqno, bool broadcast)
+{
+ struct drm_dp_sideband_msg_tx *txmsg;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg)
+ return -ENOMEM;
+
+ txmsg->dst = mstb;
+ txmsg->seqno = seqno;
+ drm_dp_encode_up_ack_reply(txmsg, req_type);
+
+ mutex_lock(&mgr->qlock);
+ list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
+ if (!mgr->tx_up_in_progress) {
+ process_single_up_tx_qlock(mgr);
+ }
+ mutex_unlock(&mgr->qlock);
+ return 0;
+}
+
+static int drm_dp_get_vc_payload_bw(int dp_link_bw, int dp_link_count)
+{
+ switch (dp_link_bw) {
+ case DP_LINK_BW_1_62:
+ return 3 * dp_link_count;
+ case DP_LINK_BW_2_7:
+ return 5 * dp_link_count;
+ case DP_LINK_BW_5_4:
+ return 10 * dp_link_count;
+ }
+ return 0;
+}
+
+/**
+ * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
+ * @mgr: manager to set state for
+ * @mst_state: true to enable MST on this connector - false to disable.
+ *
+ * This is called by the driver when it detects an MST capable device plugged
+ * into a DP MST capable port, or when a DP MST capable device is unplugged.
+ */
+int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
+{
+ int ret = 0;
+ struct drm_dp_mst_branch *mstb = NULL;
+
+ mutex_lock(&mgr->lock);
+ if (mst_state == mgr->mst_state)
+ goto out_unlock;
+
+ mgr->mst_state = mst_state;
+ /* set the device into MST mode */
+ if (mst_state) {
+ WARN_ON(mgr->mst_primary);
+
+ /* get dpcd info */
+ ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
+ if (ret != DP_RECEIVER_CAP_SIZE) {
+ DRM_DEBUG_KMS("failed to read DPCD\n");
+ goto out_unlock;
+ }
+
+ mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
+ mgr->total_pbn = 2560;
+ mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
+ mgr->avail_slots = mgr->total_slots;
+
+ /* add initial branch device at LCT 1 */
+ mstb = drm_dp_add_mst_branch_device(1, NULL);
+ if (mstb == NULL) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ mstb->mgr = mgr;
+
+ /* give this the main reference */
+ mgr->mst_primary = mstb;
+ kref_get(&mgr->mst_primary->kref);
+
+ {
+ struct drm_dp_payload reset_pay;
+ reset_pay.start_slot = 0;
+ reset_pay.num_slots = 0x3f;
+ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
+ }
+
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+ if (ret < 0) {
+ goto out_unlock;
+ }
+
+
+ /* sort out guid */
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
+ if (ret != 16) {
+ DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
+ goto out_unlock;
+ }
+
+ mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
+ if (!mgr->guid_valid) {
+ ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
+ mgr->guid_valid = true;
+ }
+
+ queue_work(system_long_wq, &mgr->work);
+
+ ret = 0;
+ } else {
+ /* disable MST on the device */
+ mstb = mgr->mst_primary;
+ mgr->mst_primary = NULL;
+ /* this can fail if the device is gone */
+ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
+ ret = 0;
+ memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
+ mgr->payload_mask = 0;
+ set_bit(0, &mgr->payload_mask);
+ }
+
+out_unlock:
+ mutex_unlock(&mgr->lock);
+ if (mstb)
+ drm_dp_put_mst_branch_device(mstb);
+ return ret;
+
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
+
+/**
+ * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
+ * @mgr: manager to suspend
+ *
+ * This function tells the MST device that we can't handle UP messages
+ * anymore. This should stop it from sending any since we are suspended.
+ */
+void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_lock(&mgr->lock);
+ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN | DP_UPSTREAM_IS_SRC);
+ mutex_unlock(&mgr->lock);
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
+
+/**
+ * drm_dp_mst_topology_mgr_resume() - resume the MST manager
+ * @mgr: manager to resume
+ *
+ * This will fetch DPCD and see if the device is still there,
+ * if it is, it will rewrite the MSTM control bits, and return.
+ *
+ * if the device fails this returns -1, and the driver should do
+ * a full MST reprobe, in case we were undocked.
+ */
+int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
+{
+ int ret = 0;
+
+ mutex_lock(&mgr->lock);
+
+ if (mgr->mst_primary) {
+ int sret;
+ sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
+ if (sret != DP_RECEIVER_CAP_SIZE) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ ret = -1;
+ goto out_unlock;
+ }
+
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
+ ret = -1;
+ goto out_unlock;
+ }
+ ret = 0;
+ } else
+ ret = -1;
+
+out_unlock:
+ mutex_unlock(&mgr->lock);
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
+
+static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+{
+ int len;
+ u8 replyblock[32];
+ int replylen, origlen, curreply;
+ int ret;
+ struct drm_dp_sideband_msg_rx *msg;
+ int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
+ msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
+
+ len = min(mgr->max_dpcd_transaction_bytes, 16);
+ ret = drm_dp_dpcd_read(mgr->aux, basereg,
+ replyblock, len);
+ if (ret != len) {
+ DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
+ return;
+ }
+ ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
+ if (!ret) {
+ DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
+ return;
+ }
+ replylen = msg->curchunk_len + msg->curchunk_hdrlen;
+
+ origlen = replylen;
+ replylen -= len;
+ curreply = len;
+ while (replylen > 0) {
+ len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
+ ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
+ replyblock, len);
+ if (ret != len) {
+ DRM_DEBUG_KMS("failed to read a chunk\n");
+ }
+ ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
+ if (ret == false)
+ DRM_DEBUG_KMS("failed to build sideband msg\n");
+ curreply += len;
+ replylen -= len;
+ }
+}
+
+static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
+{
+ int ret = 0;
+
+ drm_dp_get_one_sb_msg(mgr, false);
+
+ if (mgr->down_rep_recv.have_eomt) {
+ struct drm_dp_sideband_msg_tx *txmsg;
+ struct drm_dp_mst_branch *mstb;
+ int slot = -1;
+ mstb = drm_dp_get_mst_branch_device(mgr,
+ mgr->down_rep_recv.initial_hdr.lct,
+ mgr->down_rep_recv.initial_hdr.rad);
+
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
+ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
+
+ /* find the message */
+ slot = mgr->down_rep_recv.initial_hdr.seqno;
+ mutex_lock(&mgr->qlock);
+ txmsg = mstb->tx_slots[slot];
+ /* remove from slots */
+ mutex_unlock(&mgr->qlock);
+
+ if (!txmsg) {
+ DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
+ mstb,
+ mgr->down_rep_recv.initial_hdr.seqno,
+ mgr->down_rep_recv.initial_hdr.lct,
+ mgr->down_rep_recv.initial_hdr.rad[0],
+ mgr->down_rep_recv.msg[0]);
+ drm_dp_put_mst_branch_device(mstb);
+ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
+
+ drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
+ if (txmsg->reply.reply_type == 1) {
+ DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
+ }
+
+ memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ drm_dp_put_mst_branch_device(mstb);
+
+ mutex_lock(&mgr->qlock);
+ txmsg->state = DRM_DP_SIDEBAND_TX_RX;
+ mstb->tx_slots[slot] = NULL;
+ mutex_unlock(&mgr->qlock);
+
+ wake_up(&mgr->tx_waitq);
+ }
+ return ret;
+}
+
+static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+{
+ int ret = 0;
+ drm_dp_get_one_sb_msg(mgr, true);
+
+ if (mgr->up_req_recv.have_eomt) {
+ struct drm_dp_sideband_msg_req_body msg;
+ struct drm_dp_mst_branch *mstb;
+ bool seqno;
+ mstb = drm_dp_get_mst_branch_device(mgr,
+ mgr->up_req_recv.initial_hdr.lct,
+ mgr->up_req_recv.initial_hdr.rad);
+ if (!mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
+
+ seqno = mgr->up_req_recv.initial_hdr.seqno;
+ drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
+
+ if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
+ drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
+ drm_dp_update_port(mstb, &msg.u.conn_stat);
+ DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
+ (*mgr->cbs->hotplug)(mgr);
+
+ } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
+ drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
+ DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
+ }
+
+ drm_dp_put_mst_branch_device(mstb);
+ memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ }
+ return ret;
+}
+
+/**
+ * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
+ * @mgr: manager to notify irq for.
+ * @esi: 4 bytes from SINK_COUNT_ESI
+ *
+ * This should be called from the driver when it detects a short IRQ,
+ * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
+ * topology manager will process the sideband messages received as a result
+ * of this.
+ */
+int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
+{
+ int ret = 0;
+ int sc;
+ *handled = false;
+ sc = esi[0] & 0x3f;
+
+ if (sc != mgr->sink_count) {
+ mgr->sink_count = sc;
+ *handled = true;
+ }
+
+ if (esi[1] & DP_DOWN_REP_MSG_RDY) {
+ ret = drm_dp_mst_handle_down_rep(mgr);
+ *handled = true;
+ }
+
+ if (esi[1] & DP_UP_REQ_MSG_RDY) {
+ ret |= drm_dp_mst_handle_up_req(mgr);
+ *handled = true;
+ }
+
+ drm_dp_mst_kick_tx(mgr);
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
+
+/**
+ * drm_dp_mst_detect_port() - get connection status for an MST port
+ * @mgr: manager for this port
+ * @port: unverified pointer to a port
+ *
+ * This returns the current connection state for a port. It validates the
+ * port pointer still exists so the caller doesn't require a reference
+ */
+enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+ enum drm_connector_status status = connector_status_disconnected;
+
+ /* we need to search for the port in the mgr in case its gone */
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return connector_status_disconnected;
+
+ if (!port->ddps)
+ goto out;
+
+ switch (port->pdt) {
+ case DP_PEER_DEVICE_NONE:
+ case DP_PEER_DEVICE_MST_BRANCHING:
+ break;
+
+ case DP_PEER_DEVICE_SST_SINK:
+ status = connector_status_connected;
+ break;
+ case DP_PEER_DEVICE_DP_LEGACY_CONV:
+ if (port->ldps)
+ status = connector_status_connected;
+ break;
+ }
+out:
+ drm_dp_put_port(port);
+ return status;
+}
+EXPORT_SYMBOL(drm_dp_mst_detect_port);
+
+/**
+ * drm_dp_mst_get_edid() - get EDID for an MST port
+ * @connector: toplevel connector to get EDID for
+ * @mgr: manager for this port
+ * @port: unverified pointer to a port.
+ *
+ * This returns an EDID for the port connected to a connector,
+ * It validates the pointer still exists so the caller doesn't require a
+ * reference.
+ */
+struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+ struct edid *edid = NULL;
+
+ /* we need to search for the port in the mgr in case its gone */
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return NULL;
+
+ edid = drm_get_edid(connector, &port->aux.ddc);
+ drm_dp_put_port(port);
+ return edid;
+}
+EXPORT_SYMBOL(drm_dp_mst_get_edid);
+
+/**
+ * drm_dp_find_vcpi_slots() - find slots for this PBN value
+ * @mgr: manager to use
+ * @pbn: payload bandwidth to convert into slots.
+ */
+int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
+ int pbn)
+{
+ int num_slots;
+
+ num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
+
+ if (num_slots > mgr->avail_slots)
+ return -ENOSPC;
+ return num_slots;
+}
+EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
+
+static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_vcpi *vcpi, int pbn)
+{
+ int num_slots;
+ int ret;
+
+ num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
+
+ if (num_slots > mgr->avail_slots)
+ return -ENOSPC;
+
+ vcpi->pbn = pbn;
+ vcpi->aligned_pbn = num_slots * mgr->pbn_div;
+ vcpi->num_slots = num_slots;
+
+ ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/**
+ * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
+ * @mgr: manager for this port
+ * @port: port to allocate a virtual channel for.
+ * @pbn: payload bandwidth number to request
+ * @slots: returned number of slots for this PBN.
+ */
+bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
+{
+ int ret;
+
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return false;
+
+ if (port->vcpi.vcpi > 0) {
+ DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
+ if (pbn == port->vcpi.pbn) {
+ *slots = port->vcpi.num_slots;
+ return true;
+ }
+ }
+
+ ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
+ if (ret) {
+ DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
+ goto out;
+ }
+ DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
+ *slots = port->vcpi.num_slots;
+
+ drm_dp_put_port(port);
+ return true;
+out:
+ return false;
+}
+EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
+
+/**
+ * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
+ * @mgr: manager for this port
+ * @port: unverified pointer to a port.
+ *
+ * This just resets the number of slots for the ports VCPI for later programming.
+ */
+void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return;
+ port->vcpi.num_slots = 0;
+ drm_dp_put_port(port);
+}
+EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
+
+/**
+ * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
+ * @mgr: manager for this port
+ * @port: unverified port to deallocate vcpi for
+ */
+void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return;
+
+ drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+ port->vcpi.num_slots = 0;
+ port->vcpi.pbn = 0;
+ port->vcpi.aligned_pbn = 0;
+ port->vcpi.vcpi = 0;
+ drm_dp_put_port(port);
+}
+EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
+
+static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
+ int id, struct drm_dp_payload *payload)
+{
+ u8 payload_alloc[3], status;
+ int ret;
+ int retries = 0;
+
+ drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ DP_PAYLOAD_TABLE_UPDATED);
+
+ payload_alloc[0] = id;
+ payload_alloc[1] = payload->start_slot;
+ payload_alloc[2] = payload->num_slots;
+
+ ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
+ if (ret != 3) {
+ DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
+ goto fail;
+ }
+
+retry:
+ ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
+ goto fail;
+ }
+
+ if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
+ retries++;
+ if (retries < 20) {
+ usleep_range(10000, 20000);
+ goto retry;
+ }
+ DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
+ ret = -EINVAL;
+ goto fail;
+ }
+ ret = 0;
+fail:
+ return ret;
+}
+
+
+/**
+ * drm_dp_check_act_status() - Check ACT handled status.
+ * @mgr: manager to use
+ *
+ * Check the payload status bits in the DPCD for ACT handled completion.
+ */
+int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
+{
+ u8 status;
+ int ret;
+ int count = 0;
+
+ do {
+ ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+
+ if (ret < 0) {
+ DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
+ goto fail;
+ }
+
+ if (status & DP_PAYLOAD_ACT_HANDLED)
+ break;
+ count++;
+ udelay(100);
+
+ } while (count < 30);
+
+ if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
+ DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
+ ret = -EINVAL;
+ goto fail;
+ }
+ return 0;
+fail:
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_check_act_status);
+
+/**
+ * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
+ * @clock: dot clock for the mode
+ * @bpp: bpp for the mode.
+ *
+ * This uses the formula in the spec to calculate the PBN value for a mode.
+ */
+int drm_dp_calc_pbn_mode(int clock, int bpp)
+{
+ fixed20_12 pix_bw;
+ fixed20_12 fbpp;
+ fixed20_12 result;
+ fixed20_12 margin, tmp;
+ u32 res;
+
+ pix_bw.full = dfixed_const(clock);
+ fbpp.full = dfixed_const(bpp);
+ tmp.full = dfixed_const(8);
+ fbpp.full = dfixed_div(fbpp, tmp);
+
+ result.full = dfixed_mul(pix_bw, fbpp);
+ margin.full = dfixed_const(54);
+ tmp.full = dfixed_const(64);
+ margin.full = dfixed_div(margin, tmp);
+ result.full = dfixed_div(result, margin);
+
+ margin.full = dfixed_const(1006);
+ tmp.full = dfixed_const(1000);
+ margin.full = dfixed_div(margin, tmp);
+ result.full = dfixed_mul(result, margin);
+
+ result.full = dfixed_div(result, tmp);
+ result.full = dfixed_ceil(result);
+ res = dfixed_trunc(result);
+ return res;
+}
+EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
+
+static int test_calc_pbn_mode(void)
+{
+ int ret;
+ ret = drm_dp_calc_pbn_mode(154000, 30);
+ if (ret != 689)
+ return -EINVAL;
+ ret = drm_dp_calc_pbn_mode(234000, 30);
+ if (ret != 1047)
+ return -EINVAL;
+ return 0;
+}
+
+/* we want to kick the TX after we've ack the up/down IRQs. */
+static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
+{
+ queue_work(system_long_wq, &mgr->tx_work);
+}
+
+static void drm_dp_mst_dump_mstb(struct seq_file *m,
+ struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_port *port;
+ int tabs = mstb->lct;
+ char prefix[10];
+ int i;
+
+ for (i = 0; i < tabs; i++)
+ prefix[i] = '\t';
+ prefix[i] = '\0';
+
+ seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
+ list_for_each_entry(port, &mstb->ports, next) {
+ seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
+ if (port->mstb)
+ drm_dp_mst_dump_mstb(m, port->mstb);
+ }
+}
+
+static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
+ char *buf)
+{
+ int ret;
+ int i;
+ for (i = 0; i < 4; i++) {
+ ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
+ if (ret != 16)
+ break;
+ }
+ if (i == 4)
+ return true;
+ return false;
+}
+
+/**
+ * drm_dp_mst_dump_topology(): dump topology to seq file.
+ * @m: seq_file to dump output to
+ * @mgr: manager to dump current topology for.
+ *
+ * helper to dump MST topology to a seq file for debugfs.
+ */
+void drm_dp_mst_dump_topology(struct seq_file *m,
+ struct drm_dp_mst_topology_mgr *mgr)
+{
+ int i;
+ struct drm_dp_mst_port *port;
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_primary)
+ drm_dp_mst_dump_mstb(m, mgr->mst_primary);
+
+ /* dump VCPIs */
+ mutex_unlock(&mgr->lock);
+
+ mutex_lock(&mgr->payload_lock);
+ seq_printf(m, "vcpi: %lx\n", mgr->payload_mask);
+
+ for (i = 0; i < mgr->max_payloads; i++) {
+ if (mgr->proposed_vcpis[i]) {
+ port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+ seq_printf(m, "vcpi %d: %d %d %d\n", i, port->port_num, port->vcpi.vcpi, port->vcpi.num_slots);
+ } else
+ seq_printf(m, "vcpi %d:unsed\n", i);
+ }
+ for (i = 0; i < mgr->max_payloads; i++) {
+ seq_printf(m, "payload %d: %d, %d, %d\n",
+ i,
+ mgr->payloads[i].payload_state,
+ mgr->payloads[i].start_slot,
+ mgr->payloads[i].num_slots);
+
+
+ }
+ mutex_unlock(&mgr->payload_lock);
+
+ mutex_lock(&mgr->lock);
+ if (mgr->mst_primary) {
+ u8 buf[64];
+ bool bret;
+ int ret;
+ ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
+ seq_printf(m, "dpcd: ");
+ for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++)
+ seq_printf(m, "%02x ", buf[i]);
+ seq_printf(m, "\n");
+ ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
+ seq_printf(m, "faux/mst: ");
+ for (i = 0; i < 2; i++)
+ seq_printf(m, "%02x ", buf[i]);
+ seq_printf(m, "\n");
+ ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
+ seq_printf(m, "mst ctrl: ");
+ for (i = 0; i < 1; i++)
+ seq_printf(m, "%02x ", buf[i]);
+ seq_printf(m, "\n");
+
+ bret = dump_dp_payload_table(mgr, buf);
+ if (bret == true) {
+ seq_printf(m, "payload table: ");
+ for (i = 0; i < 63; i++)
+ seq_printf(m, "%02x ", buf[i]);
+ seq_printf(m, "\n");
+ }
+
+ }
+
+ mutex_unlock(&mgr->lock);
+
+}
+EXPORT_SYMBOL(drm_dp_mst_dump_topology);
+
+static void drm_dp_tx_work(struct work_struct *work)
+{
+ struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
+
+ mutex_lock(&mgr->qlock);
+ if (mgr->tx_down_in_progress)
+ process_single_down_tx_qlock(mgr);
+ mutex_unlock(&mgr->qlock);
+}
+
+/**
+ * drm_dp_mst_topology_mgr_init - initialise a topology manager
+ * @mgr: manager struct to initialise
+ * @dev: device providing this structure - for i2c addition.
+ * @aux: DP helper aux channel to talk to this device
+ * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
+ * @max_payloads: maximum number of payloads this GPU can source
+ * @conn_base_id: the connector object ID the MST device is connected to.
+ *
+ * Return 0 for success, or negative error code on failure
+ */
+int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
+ struct device *dev, struct drm_dp_aux *aux,
+ int max_dpcd_transaction_bytes,
+ int max_payloads, int conn_base_id)
+{
+ mutex_init(&mgr->lock);
+ mutex_init(&mgr->qlock);
+ mutex_init(&mgr->payload_lock);
+ INIT_LIST_HEAD(&mgr->tx_msg_upq);
+ INIT_LIST_HEAD(&mgr->tx_msg_downq);
+ INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
+ INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
+ init_waitqueue_head(&mgr->tx_waitq);
+ mgr->dev = dev;
+ mgr->aux = aux;
+ mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
+ mgr->max_payloads = max_payloads;
+ mgr->conn_base_id = conn_base_id;
+ mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
+ if (!mgr->payloads)
+ return -ENOMEM;
+ mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
+ if (!mgr->proposed_vcpis)
+ return -ENOMEM;
+ set_bit(0, &mgr->payload_mask);
+ test_calc_pbn_mode();
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
+
+/**
+ * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
+ * @mgr: manager to destroy
+ */
+void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_lock(&mgr->payload_lock);
+ kfree(mgr->payloads);
+ mgr->payloads = NULL;
+ kfree(mgr->proposed_vcpis);
+ mgr->proposed_vcpis = NULL;
+ mutex_unlock(&mgr->payload_lock);
+ mgr->dev = NULL;
+ mgr->aux = NULL;
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
+
+/* I2C device */
+static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+ int num)
+{
+ struct drm_dp_aux *aux = adapter->algo_data;
+ struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
+ struct drm_dp_mst_branch *mstb;
+ struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+ unsigned int i;
+ bool reading = false;
+ struct drm_dp_sideband_msg_req_body msg;
+ struct drm_dp_sideband_msg_tx *txmsg = NULL;
+ int ret;
+
+ mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ if (!mstb)
+ return -EREMOTEIO;
+
+ /* construct i2c msg */
+ /* see if last msg is a read */
+ if (msgs[num - 1].flags & I2C_M_RD)
+ reading = true;
+
+ if (!reading) {
+ DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ msg.req_type = DP_REMOTE_I2C_READ;
+ msg.u.i2c_read.num_transactions = num - 1;
+ msg.u.i2c_read.port_number = port->port_num;
+ for (i = 0; i < num - 1; i++) {
+ msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
+ msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
+ msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
+ }
+ msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
+ msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ txmsg->dst = mstb;
+ drm_dp_encode_sideband_req(&msg, txmsg);
+
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret > 0) {
+
+ if (txmsg->reply.reply_type == 1) { /* got a NAK back */
+ ret = -EREMOTEIO;
+ goto out;
+ }
+ if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
+ ret = -EIO;
+ goto out;
+ }
+ memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
+ ret = num;
+ }
+out:
+ kfree(txmsg);
+ drm_dp_put_mst_branch_device(mstb);
+ return ret;
+}
+
+static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+ I2C_FUNC_10BIT_ADDR;
+}
+
+static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
+ .functionality = drm_dp_mst_i2c_functionality,
+ .master_xfer = drm_dp_mst_i2c_xfer,
+};
+
+/**
+ * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
+{
+ aux->ddc.algo = &drm_dp_mst_i2c_algo;
+ aux->ddc.algo_data = aux;
+ aux->ddc.retries = 3;
+
+ aux->ddc.class = I2C_CLASS_DDC;
+ aux->ddc.owner = THIS_MODULE;
+ aux->ddc.dev.parent = aux->dev;
+ aux->ddc.dev.of_node = aux->dev->of_node;
+
+ strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
+ sizeof(aux->ddc.name));
+
+ return i2c_add_adapter(&aux->ddc);
+}
+
+/**
+ * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
+ * @aux: DisplayPort AUX channel
+ */
+static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
+{
+ i2c_del_adapter(&aux->ddc);
+}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 8218078b6133..3242e208c0d0 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -1,31 +1,11 @@
-/**
- * \file drm_drv.c
- * Generic driver template
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- *
- * To use this template, you must at least define the following (samples
- * given for the MGA driver):
- *
- * \code
- * #define DRIVER_AUTHOR "VA Linux Systems, Inc."
- *
- * #define DRIVER_NAME "mga"
- * #define DRIVER_DESC "Matrox G200/G400"
- * #define DRIVER_DATE "20001127"
- *
- * #define drm_x mga_##x
- * \endcode
- */
-
/*
- * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
*
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
+ * Author Rickard E. (Rik) Faith <faith@valinux.com>
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
@@ -40,432 +20,906 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mount.h>
#include <linux/slab.h>
-#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_core.h>
+#include "drm_legacy.h"
+unsigned int drm_debug = 0; /* 1 to enable debug output */
+EXPORT_SYMBOL(drm_debug);
-static int drm_version(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
- [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
-
-/** Ioctl table */
-static const struct drm_ioctl_desc drm_ioctls[] = {
- DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
-
- DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
-#if __OS_HAS_AGP
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-#endif
-
- DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
-
- DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-
- DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-};
+unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
-#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
+unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
-/** File operations structure */
-static const struct file_operations drm_stub_fops = {
- .owner = THIS_MODULE,
- .open = drm_stub_open,
- .llseek = noop_llseek,
-};
+/*
+ * Default to use monotonic timestamps for wait-for-vblank and page-flip
+ * complete events.
+ */
+unsigned int drm_timestamp_monotonic = 1;
-static int __init drm_core_init(void)
+MODULE_AUTHOR(CORE_AUTHOR);
+MODULE_DESCRIPTION(CORE_DESC);
+MODULE_LICENSE("GPL and additional rights");
+MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
+MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
+MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
+
+module_param_named(debug, drm_debug, int, 0600);
+module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
+module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
+module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
+
+static DEFINE_SPINLOCK(drm_minor_lock);
+static struct idr drm_minors_idr;
+
+struct class *drm_class;
+static struct dentry *drm_debugfs_root;
+
+int drm_err(const char *func, const char *format, ...)
{
- int ret = -ENOMEM;
+ struct va_format vaf;
+ va_list args;
+ int r;
- drm_global_init();
- drm_connector_ida_init();
- idr_init(&drm_minors_idr);
+ va_start(args, format);
- if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
- goto err_p1;
+ vaf.fmt = format;
+ vaf.va = &args;
- drm_class = drm_sysfs_create(THIS_MODULE, "drm");
- if (IS_ERR(drm_class)) {
- printk(KERN_ERR "DRM: Error creating drm class.\n");
- ret = PTR_ERR(drm_class);
- goto err_p2;
+ r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
+
+ va_end(args);
+
+ return r;
+}
+EXPORT_SYMBOL(drm_err);
+
+void drm_ut_debug_printk(const char *function_name, const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, format);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(drm_ut_debug_printk);
+
+struct drm_master *drm_master_create(struct drm_minor *minor)
+{
+ struct drm_master *master;
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return NULL;
+
+ kref_init(&master->refcount);
+ spin_lock_init(&master->lock.spinlock);
+ init_waitqueue_head(&master->lock.lock_queue);
+ if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
+ kfree(master);
+ return NULL;
}
+ INIT_LIST_HEAD(&master->magicfree);
+ master->minor = minor;
- drm_debugfs_root = debugfs_create_dir("dri", NULL);
- if (!drm_debugfs_root) {
- DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
- ret = -1;
- goto err_p3;
+ return master;
+}
+
+struct drm_master *drm_master_get(struct drm_master *master)
+{
+ kref_get(&master->refcount);
+ return master;
+}
+EXPORT_SYMBOL(drm_master_get);
+
+static void drm_master_destroy(struct kref *kref)
+{
+ struct drm_master *master = container_of(kref, struct drm_master, refcount);
+ struct drm_magic_entry *pt, *next;
+ struct drm_device *dev = master->minor->dev;
+ struct drm_map_list *r_list, *list_temp;
+
+ mutex_lock(&dev->struct_mutex);
+ if (dev->driver->master_destroy)
+ dev->driver->master_destroy(dev, master);
+
+ list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
+ if (r_list->master == master) {
+ drm_rmmap_locked(dev, r_list->map);
+ r_list = NULL;
+ }
}
- DRM_INFO("Initialized %s %d.%d.%d %s\n",
- CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
- return 0;
-err_p3:
- drm_sysfs_destroy();
-err_p2:
- unregister_chrdev(DRM_MAJOR, "drm");
+ if (master->unique) {
+ kfree(master->unique);
+ master->unique = NULL;
+ master->unique_len = 0;
+ }
- idr_destroy(&drm_minors_idr);
-err_p1:
- return ret;
+ list_for_each_entry_safe(pt, next, &master->magicfree, head) {
+ list_del(&pt->head);
+ drm_ht_remove_item(&master->magiclist, &pt->hash_item);
+ kfree(pt);
+ }
+
+ drm_ht_remove(&master->magiclist);
+
+ mutex_unlock(&dev->struct_mutex);
+ kfree(master);
}
-static void __exit drm_core_exit(void)
+void drm_master_put(struct drm_master **master)
{
- debugfs_remove(drm_debugfs_root);
- drm_sysfs_destroy();
+ kref_put(&(*master)->refcount, drm_master_destroy);
+ *master = NULL;
+}
+EXPORT_SYMBOL(drm_master_put);
- unregister_chrdev(DRM_MAJOR, "drm");
+int drm_setmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ int ret = 0;
- drm_connector_ida_destroy();
- idr_destroy(&drm_minors_idr);
+ mutex_lock(&dev->master_mutex);
+ if (file_priv->is_master)
+ goto out_unlock;
+
+ if (file_priv->minor->master) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!file_priv->master) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ file_priv->minor->master = drm_master_get(file_priv->master);
+ file_priv->is_master = 1;
+ if (dev->driver->master_set) {
+ ret = dev->driver->master_set(dev, file_priv, false);
+ if (unlikely(ret != 0)) {
+ file_priv->is_master = 0;
+ drm_master_put(&file_priv->minor->master);
+ }
+ }
+
+out_unlock:
+ mutex_unlock(&dev->master_mutex);
+ return ret;
}
-module_init(drm_core_init);
-module_exit(drm_core_exit);
+int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ int ret = -EINVAL;
-/**
- * Copy and IOCTL return string to user space
+ mutex_lock(&dev->master_mutex);
+ if (!file_priv->is_master)
+ goto out_unlock;
+
+ if (!file_priv->minor->master)
+ goto out_unlock;
+
+ ret = 0;
+ if (dev->driver->master_drop)
+ dev->driver->master_drop(dev, file_priv, false);
+ drm_master_put(&file_priv->minor->master);
+ file_priv->is_master = 0;
+
+out_unlock:
+ mutex_unlock(&dev->master_mutex);
+ return ret;
+}
+
+/*
+ * DRM Minors
+ * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
+ * of them is represented by a drm_minor object. Depending on the capabilities
+ * of the device-driver, different interfaces are registered.
+ *
+ * Minors can be accessed via dev->$minor_name. This pointer is either
+ * NULL or a valid drm_minor pointer and stays valid as long as the device is
+ * valid. This means, DRM minors have the same life-time as the underlying
+ * device. However, this doesn't mean that the minor is active. Minors are
+ * registered and unregistered dynamically according to device-state.
*/
-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
+
+static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
+ unsigned int type)
{
- int len;
+ switch (type) {
+ case DRM_MINOR_LEGACY:
+ return &dev->primary;
+ case DRM_MINOR_RENDER:
+ return &dev->render;
+ case DRM_MINOR_CONTROL:
+ return &dev->control;
+ default:
+ return NULL;
+ }
+}
- /* don't overflow userbuf */
- len = strlen(value);
- if (len > *buf_len)
- len = *buf_len;
+static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
+{
+ struct drm_minor *minor;
+ unsigned long flags;
+ int r;
+
+ minor = kzalloc(sizeof(*minor), GFP_KERNEL);
+ if (!minor)
+ return -ENOMEM;
+
+ minor->type = type;
+ minor->dev = dev;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ r = idr_alloc(&drm_minors_idr,
+ NULL,
+ 64 * type,
+ 64 * (type + 1),
+ GFP_NOWAIT);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+ idr_preload_end();
+
+ if (r < 0)
+ goto err_free;
+
+ minor->index = r;
+
+ minor->kdev = drm_sysfs_minor_alloc(minor);
+ if (IS_ERR(minor->kdev)) {
+ r = PTR_ERR(minor->kdev);
+ goto err_index;
+ }
- /* let userspace know exact length of driver value (which could be
- * larger than the userspace-supplied buffer) */
- *buf_len = strlen(value);
+ *drm_minor_get_slot(dev, type) = minor;
+ return 0;
+
+err_index:
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_remove(&drm_minors_idr, minor->index);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+err_free:
+ kfree(minor);
+ return r;
+}
+
+static void drm_minor_free(struct drm_device *dev, unsigned int type)
+{
+ struct drm_minor **slot, *minor;
+ unsigned long flags;
+
+ slot = drm_minor_get_slot(dev, type);
+ minor = *slot;
+ if (!minor)
+ return;
+
+ drm_mode_group_destroy(&minor->mode_group);
+ put_device(minor->kdev);
+
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_remove(&drm_minors_idr, minor->index);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+
+ kfree(minor);
+ *slot = NULL;
+}
- /* finally, try filling in the userbuf */
- if (len && buf)
- if (copy_to_user(buf, value, len))
- return -EFAULT;
+static int drm_minor_register(struct drm_device *dev, unsigned int type)
+{
+ struct drm_minor *minor;
+ unsigned long flags;
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ minor = *drm_minor_get_slot(dev, type);
+ if (!minor)
+ return 0;
+
+ ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
+ if (ret) {
+ DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
+ return ret;
+ }
+
+ ret = device_add(minor->kdev);
+ if (ret)
+ goto err_debugfs;
+
+ /* replace NULL with @minor so lookups will succeed from now on */
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_replace(&drm_minors_idr, minor, minor->index);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+
+ DRM_DEBUG("new minor registered %d\n", minor->index);
return 0;
+
+err_debugfs:
+ drm_debugfs_cleanup(minor);
+ return ret;
+}
+
+static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
+{
+ struct drm_minor *minor;
+ unsigned long flags;
+
+ minor = *drm_minor_get_slot(dev, type);
+ if (!minor || !device_is_registered(minor->kdev))
+ return;
+
+ /* replace @minor with NULL so lookups will fail from now on */
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_replace(&drm_minors_idr, NULL, minor->index);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+
+ device_del(minor->kdev);
+ dev_set_drvdata(minor->kdev, NULL); /* safety belt */
+ drm_debugfs_cleanup(minor);
}
/**
- * Get version information
+ * drm_minor_acquire - Acquire a DRM minor
+ * @minor_id: Minor ID of the DRM-minor
+ *
+ * Looks up the given minor-ID and returns the respective DRM-minor object. The
+ * refence-count of the underlying device is increased so you must release this
+ * object with drm_minor_release().
*
- * \param inode device inode.
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument, pointing to a drm_version structure.
- * \return zero on success or negative number on failure.
+ * As long as you hold this minor, it is guaranteed that the object and the
+ * minor->dev pointer will stay valid! However, the device may get unplugged and
+ * unregistered while you hold the minor.
*
- * Fills in the version information in \p arg.
+ * Returns:
+ * Pointer to minor-object with increased device-refcount, or PTR_ERR on
+ * failure.
*/
-static int drm_version(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+struct drm_minor *drm_minor_acquire(unsigned int minor_id)
{
- struct drm_version *version = data;
- int err;
+ struct drm_minor *minor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ minor = idr_find(&drm_minors_idr, minor_id);
+ if (minor)
+ drm_dev_ref(minor->dev);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+
+ if (!minor) {
+ return ERR_PTR(-ENODEV);
+ } else if (drm_device_is_unplugged(minor->dev)) {
+ drm_dev_unref(minor->dev);
+ return ERR_PTR(-ENODEV);
+ }
- version->version_major = dev->driver->major;
- version->version_minor = dev->driver->minor;
- version->version_patchlevel = dev->driver->patchlevel;
- err = drm_copy_field(version->name, &version->name_len,
- dev->driver->name);
- if (!err)
- err = drm_copy_field(version->date, &version->date_len,
- dev->driver->date);
- if (!err)
- err = drm_copy_field(version->desc, &version->desc_len,
- dev->driver->desc);
+ return minor;
+}
- return err;
+/**
+ * drm_minor_release - Release DRM minor
+ * @minor: Pointer to DRM minor object
+ *
+ * Release a minor that was previously acquired via drm_minor_acquire().
+ */
+void drm_minor_release(struct drm_minor *minor)
+{
+ drm_dev_unref(minor->dev);
}
/**
- * drm_ioctl_permit - Check ioctl permissions against caller
+ * drm_put_dev - Unregister and release a DRM device
+ * @dev: DRM device
+ *
+ * Called at module unload time or when a PCI device is unplugged.
*
- * @flags: ioctl permission flags.
- * @file_priv: Pointer to struct drm_file identifying the caller.
+ * Use of this function is discouraged. It will eventually go away completely.
+ * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
*
- * Checks whether the caller is allowed to run an ioctl with the
- * indicated permissions. If so, returns zero. Otherwise returns an
- * error code suitable for ioctl return.
+ * Cleans up all DRM device, calling drm_lastclose().
*/
-static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
+void drm_put_dev(struct drm_device *dev)
{
- /* ROOT_ONLY is only for CAP_SYS_ADMIN */
- if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
- return -EACCES;
-
- /* AUTH is only for authenticated or render client */
- if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
- !file_priv->authenticated))
- return -EACCES;
-
- /* MASTER is only for master or control clients */
- if (unlikely((flags & DRM_MASTER) && !file_priv->is_master &&
- !drm_is_control_client(file_priv)))
- return -EACCES;
-
- /* Control clients must be explicitly allowed */
- if (unlikely(!(flags & DRM_CONTROL_ALLOW) &&
- drm_is_control_client(file_priv)))
- return -EACCES;
-
- /* Render clients must be explicitly allowed */
- if (unlikely(!(flags & DRM_RENDER_ALLOW) &&
- drm_is_render_client(file_priv)))
- return -EACCES;
+ DRM_DEBUG("\n");
- return 0;
+ if (!dev) {
+ DRM_ERROR("cleanup called no dev\n");
+ return;
+ }
+
+ drm_dev_unregister(dev);
+ drm_dev_unref(dev);
+}
+EXPORT_SYMBOL(drm_put_dev);
+
+void drm_unplug_dev(struct drm_device *dev)
+{
+ /* for a USB device */
+ drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+ drm_minor_unregister(dev, DRM_MINOR_RENDER);
+ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
+
+ mutex_lock(&drm_global_mutex);
+
+ drm_device_set_unplugged(dev);
+
+ if (dev->open_count == 0) {
+ drm_put_dev(dev);
+ }
+ mutex_unlock(&drm_global_mutex);
+}
+EXPORT_SYMBOL(drm_unplug_dev);
+
+/*
+ * DRM internal mount
+ * We want to be able to allocate our own "struct address_space" to control
+ * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
+ * stand-alone address_space objects, so we need an underlying inode. As there
+ * is no way to allocate an independent inode easily, we need a fake internal
+ * VFS mount-point.
+ *
+ * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
+ * frees it again. You are allowed to use iget() and iput() to get references to
+ * the inode. But each drm_fs_inode_new() call must be paired with exactly one
+ * drm_fs_inode_free() call (which does not have to be the last iput()).
+ * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
+ * between multiple inode-users. You could, technically, call
+ * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
+ * iput(), but this way you'd end up with a new vfsmount for each inode.
+ */
+
+static int drm_fs_cnt;
+static struct vfsmount *drm_fs_mnt;
+
+static const struct dentry_operations drm_fs_dops = {
+ .d_dname = simple_dname,
+};
+
+static const struct super_operations drm_fs_sops = {
+ .statfs = simple_statfs,
+};
+
+static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_pseudo(fs_type,
+ "drm:",
+ &drm_fs_sops,
+ &drm_fs_dops,
+ 0x010203ff);
+}
+
+static struct file_system_type drm_fs_type = {
+ .name = "drm",
+ .owner = THIS_MODULE,
+ .mount = drm_fs_mount,
+ .kill_sb = kill_anon_super,
+};
+
+static struct inode *drm_fs_inode_new(void)
+{
+ struct inode *inode;
+ int r;
+
+ r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
+ if (r < 0) {
+ DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
+ return ERR_PTR(r);
+ }
+
+ inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
+ if (IS_ERR(inode))
+ simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
+
+ return inode;
+}
+
+static void drm_fs_inode_free(struct inode *inode)
+{
+ if (inode) {
+ iput(inode);
+ simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
+ }
}
/**
- * Called whenever a process performs an ioctl on /dev/drm.
+ * drm_dev_alloc - Allocate new DRM device
+ * @driver: DRM driver to allocate device for
+ * @parent: Parent device object
*
- * \param inode device inode.
- * \param file_priv DRM file private.
- * \param cmd command.
- * \param arg user argument.
- * \return zero on success or negative number on failure.
+ * Allocate and initialize a new DRM device. No device registration is done.
+ * Call drm_dev_register() to advertice the device to user space and register it
+ * with other core subsystems.
*
- * Looks up the ioctl function in the ::ioctls table, checking for root
- * previleges if so required, and dispatches to the respective function.
+ * The initial ref-count of the object is 1. Use drm_dev_ref() and
+ * drm_dev_unref() to take and drop further ref-counts.
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or NULL if out of memory.
*/
-long drm_ioctl(struct file *filp,
- unsigned int cmd, unsigned long arg)
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+ struct device *parent)
{
- struct drm_file *file_priv = filp->private_data;
struct drm_device *dev;
- const struct drm_ioctl_desc *ioctl = NULL;
- drm_ioctl_t *func;
- unsigned int nr = DRM_IOCTL_NR(cmd);
- int retcode = -EINVAL;
- char stack_kdata[128];
- char *kdata = NULL;
- unsigned int usize, asize;
-
- dev = file_priv->minor->dev;
-
- if (drm_device_is_unplugged(dev))
- return -ENODEV;
-
- if ((nr >= DRM_CORE_IOCTL_COUNT) &&
- ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
- goto err_i1;
- if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
- (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
- u32 drv_size;
- ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
- drv_size = _IOC_SIZE(ioctl->cmd_drv);
- usize = asize = _IOC_SIZE(cmd);
- if (drv_size > asize)
- asize = drv_size;
- cmd = ioctl->cmd_drv;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ kref_init(&dev->ref);
+ dev->dev = parent;
+ dev->driver = driver;
+
+ INIT_LIST_HEAD(&dev->filelist);
+ INIT_LIST_HEAD(&dev->ctxlist);
+ INIT_LIST_HEAD(&dev->vmalist);
+ INIT_LIST_HEAD(&dev->maplist);
+ INIT_LIST_HEAD(&dev->vblank_event_list);
+
+ spin_lock_init(&dev->buf_lock);
+ spin_lock_init(&dev->event_lock);
+ mutex_init(&dev->struct_mutex);
+ mutex_init(&dev->ctxlist_mutex);
+ mutex_init(&dev->master_mutex);
+
+ dev->anon_inode = drm_fs_inode_new();
+ if (IS_ERR(dev->anon_inode)) {
+ ret = PTR_ERR(dev->anon_inode);
+ DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
+ goto err_free;
}
- else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
- u32 drv_size;
-
- ioctl = &drm_ioctls[nr];
- drv_size = _IOC_SIZE(ioctl->cmd);
- usize = asize = _IOC_SIZE(cmd);
- if (drv_size > asize)
- asize = drv_size;
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_minors;
+ }
- cmd = ioctl->cmd;
- } else
- goto err_i1;
+ if (drm_core_check_feature(dev, DRIVER_RENDER)) {
+ ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
+ if (ret)
+ goto err_minors;
+ }
- DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
- task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->kdev->devt),
- file_priv->authenticated, ioctl->name);
+ ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
+ if (ret)
+ goto err_minors;
- /* Do not trust userspace, use our own definition */
- func = ioctl->func;
+ if (drm_ht_create(&dev->map_hash, 12))
+ goto err_minors;
- if (unlikely(!func)) {
- DRM_DEBUG("no function\n");
- retcode = -EINVAL;
- goto err_i1;
+ ret = drm_legacy_ctxbitmap_init(dev);
+ if (ret) {
+ DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+ goto err_ht;
}
- retcode = drm_ioctl_permit(ioctl->flags, file_priv);
- if (unlikely(retcode))
- goto err_i1;
-
- if (cmd & (IOC_IN | IOC_OUT)) {
- if (asize <= sizeof(stack_kdata)) {
- kdata = stack_kdata;
- } else {
- kdata = kmalloc(asize, GFP_KERNEL);
- if (!kdata) {
- retcode = -ENOMEM;
- goto err_i1;
- }
+ if (driver->driver_features & DRIVER_GEM) {
+ ret = drm_gem_init(dev);
+ if (ret) {
+ DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
+ goto err_ctxbitmap;
}
- if (asize > usize)
- memset(kdata + usize, 0, asize - usize);
}
- if (cmd & IOC_IN) {
- if (copy_from_user(kdata, (void __user *)arg,
- usize) != 0) {
- retcode = -EFAULT;
- goto err_i1;
- }
- } else if (cmd & IOC_OUT) {
- memset(kdata, 0, usize);
- }
+ return dev;
+
+err_ctxbitmap:
+ drm_legacy_ctxbitmap_cleanup(dev);
+err_ht:
+ drm_ht_remove(&dev->map_hash);
+err_minors:
+ drm_minor_free(dev, DRM_MINOR_LEGACY);
+ drm_minor_free(dev, DRM_MINOR_RENDER);
+ drm_minor_free(dev, DRM_MINOR_CONTROL);
+ drm_fs_inode_free(dev->anon_inode);
+err_free:
+ mutex_destroy(&dev->master_mutex);
+ kfree(dev);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_dev_alloc);
+
+static void drm_dev_release(struct kref *ref)
+{
+ struct drm_device *dev = container_of(ref, struct drm_device, ref);
+
+ if (dev->driver->driver_features & DRIVER_GEM)
+ drm_gem_destroy(dev);
+
+ drm_legacy_ctxbitmap_cleanup(dev);
+ drm_ht_remove(&dev->map_hash);
+ drm_fs_inode_free(dev->anon_inode);
+
+ drm_minor_free(dev, DRM_MINOR_LEGACY);
+ drm_minor_free(dev, DRM_MINOR_RENDER);
+ drm_minor_free(dev, DRM_MINOR_CONTROL);
+
+ mutex_destroy(&dev->master_mutex);
+ kfree(dev->unique);
+ kfree(dev);
+}
- if (ioctl->flags & DRM_UNLOCKED)
- retcode = func(dev, kdata, file_priv);
- else {
- mutex_lock(&drm_global_mutex);
- retcode = func(dev, kdata, file_priv);
- mutex_unlock(&drm_global_mutex);
+/**
+ * drm_dev_ref - Take reference of a DRM device
+ * @dev: device to take reference of or NULL
+ *
+ * This increases the ref-count of @dev by one. You *must* already own a
+ * reference when calling this. Use drm_dev_unref() to drop this reference
+ * again.
+ *
+ * This function never fails. However, this function does not provide *any*
+ * guarantee whether the device is alive or running. It only provides a
+ * reference to the object and the memory associated with it.
+ */
+void drm_dev_ref(struct drm_device *dev)
+{
+ if (dev)
+ kref_get(&dev->ref);
+}
+EXPORT_SYMBOL(drm_dev_ref);
+
+/**
+ * drm_dev_unref - Drop reference of a DRM device
+ * @dev: device to drop reference of or NULL
+ *
+ * This decreases the ref-count of @dev by one. The device is destroyed if the
+ * ref-count drops to zero.
+ */
+void drm_dev_unref(struct drm_device *dev)
+{
+ if (dev)
+ kref_put(&dev->ref, drm_dev_release);
+}
+EXPORT_SYMBOL(drm_dev_unref);
+
+/**
+ * drm_dev_register - Register DRM device
+ * @dev: Device to register
+ * @flags: Flags passed to the driver's .load() function
+ *
+ * Register the DRM device @dev with the system, advertise device to user-space
+ * and start normal device operation. @dev must be allocated via drm_dev_alloc()
+ * previously.
+ *
+ * Never call this twice on any device!
+ *
+ * RETURNS:
+ * 0 on success, negative error code on failure.
+ */
+int drm_dev_register(struct drm_device *dev, unsigned long flags)
+{
+ int ret;
+
+ mutex_lock(&drm_global_mutex);
+
+ ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_minors;
+
+ ret = drm_minor_register(dev, DRM_MINOR_RENDER);
+ if (ret)
+ goto err_minors;
+
+ ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
+ if (ret)
+ goto err_minors;
+
+ if (dev->driver->load) {
+ ret = dev->driver->load(dev, flags);
+ if (ret)
+ goto err_minors;
}
- if (cmd & IOC_OUT) {
- if (copy_to_user((void __user *)arg, kdata,
- usize) != 0)
- retcode = -EFAULT;
+ /* setup grouping for legacy outputs */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_mode_group_init_legacy_group(dev,
+ &dev->primary->mode_group);
+ if (ret)
+ goto err_unload;
}
- err_i1:
- if (!ioctl)
- DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
- task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->kdev->devt),
- file_priv->authenticated, cmd, nr);
-
- if (kdata != stack_kdata)
- kfree(kdata);
- if (retcode)
- DRM_DEBUG("ret = %d\n", retcode);
- return retcode;
+ ret = 0;
+ goto out_unlock;
+
+err_unload:
+ if (dev->driver->unload)
+ dev->driver->unload(dev);
+err_minors:
+ drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+ drm_minor_unregister(dev, DRM_MINOR_RENDER);
+ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
+out_unlock:
+ mutex_unlock(&drm_global_mutex);
+ return ret;
}
-EXPORT_SYMBOL(drm_ioctl);
+EXPORT_SYMBOL(drm_dev_register);
/**
- * drm_ioctl_flags - Check for core ioctl and return ioctl permission flags
+ * drm_dev_unregister - Unregister DRM device
+ * @dev: Device to unregister
+ *
+ * Unregister the DRM device from the system. This does the reverse of
+ * drm_dev_register() but does not deallocate the device. The caller must call
+ * drm_dev_unref() to drop their final reference.
+ */
+void drm_dev_unregister(struct drm_device *dev)
+{
+ struct drm_map_list *r_list, *list_temp;
+
+ drm_lastclose(dev);
+
+ if (dev->driver->unload)
+ dev->driver->unload(dev);
+
+ if (dev->agp)
+ drm_pci_agp_destroy(dev);
+
+ drm_vblank_cleanup(dev);
+
+ list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
+ drm_rmmap(dev, r_list->map);
+
+ drm_minor_unregister(dev, DRM_MINOR_LEGACY);
+ drm_minor_unregister(dev, DRM_MINOR_RENDER);
+ drm_minor_unregister(dev, DRM_MINOR_CONTROL);
+}
+EXPORT_SYMBOL(drm_dev_unregister);
+
+/**
+ * drm_dev_set_unique - Set the unique name of a DRM device
+ * @dev: device of which to set the unique name
+ * @fmt: format string for unique name
+ *
+ * Sets the unique name of a DRM device using the specified format string and
+ * a variable list of arguments. Drivers can use this at driver probe time if
+ * the unique name of the devices they drive is static.
*
- * @nr: Ioctl number.
- * @flags: Where to return the ioctl permission flags
+ * Return: 0 on success or a negative error code on failure.
*/
-bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
+int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
{
- if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) ||
- (nr < DRM_COMMAND_BASE)) {
- *flags = drm_ioctls[nr].flags;
- return true;
+ va_list ap;
+
+ kfree(dev->unique);
+
+ va_start(ap, fmt);
+ dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+
+ return dev->unique ? 0 : -ENOMEM;
+}
+EXPORT_SYMBOL(drm_dev_set_unique);
+
+/*
+ * DRM Core
+ * The DRM core module initializes all global DRM objects and makes them
+ * available to drivers. Once setup, drivers can probe their respective
+ * devices.
+ * Currently, core management includes:
+ * - The "DRM-Global" key/value database
+ * - Global ID management for connectors
+ * - DRM major number allocation
+ * - DRM minor management
+ * - DRM sysfs class
+ * - DRM debugfs root
+ *
+ * Furthermore, the DRM core provides dynamic char-dev lookups. For each
+ * interface registered on a DRM device, you can request minor numbers from DRM
+ * core. DRM core takes care of major-number management and char-dev
+ * registration. A stub ->open() callback forwards any open() requests to the
+ * registered minor.
+ */
+
+static int drm_stub_open(struct inode *inode, struct file *filp)
+{
+ const struct file_operations *new_fops;
+ struct drm_minor *minor;
+ int err;
+
+ DRM_DEBUG("\n");
+
+ mutex_lock(&drm_global_mutex);
+ minor = drm_minor_acquire(iminor(inode));
+ if (IS_ERR(minor)) {
+ err = PTR_ERR(minor);
+ goto out_unlock;
+ }
+
+ new_fops = fops_get(minor->dev->driver->fops);
+ if (!new_fops) {
+ err = -ENODEV;
+ goto out_release;
}
- return false;
+ replace_fops(filp, new_fops);
+ if (filp->f_op->open)
+ err = filp->f_op->open(inode, filp);
+ else
+ err = 0;
+
+out_release:
+ drm_minor_release(minor);
+out_unlock:
+ mutex_unlock(&drm_global_mutex);
+ return err;
}
-EXPORT_SYMBOL(drm_ioctl_flags);
+
+static const struct file_operations drm_stub_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_stub_open,
+ .llseek = noop_llseek,
+};
+
+static int __init drm_core_init(void)
+{
+ int ret = -ENOMEM;
+
+ drm_global_init();
+ drm_connector_ida_init();
+ idr_init(&drm_minors_idr);
+
+ if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
+ goto err_p1;
+
+ drm_class = drm_sysfs_create(THIS_MODULE, "drm");
+ if (IS_ERR(drm_class)) {
+ printk(KERN_ERR "DRM: Error creating drm class.\n");
+ ret = PTR_ERR(drm_class);
+ goto err_p2;
+ }
+
+ drm_debugfs_root = debugfs_create_dir("dri", NULL);
+ if (!drm_debugfs_root) {
+ DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
+ ret = -1;
+ goto err_p3;
+ }
+
+ DRM_INFO("Initialized %s %d.%d.%d %s\n",
+ CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+ return 0;
+err_p3:
+ drm_sysfs_destroy();
+err_p2:
+ unregister_chrdev(DRM_MAJOR, "drm");
+
+ idr_destroy(&drm_minors_idr);
+err_p1:
+ return ret;
+}
+
+static void __exit drm_core_exit(void)
+{
+ debugfs_remove(drm_debugfs_root);
+ drm_sysfs_destroy();
+
+ unregister_chrdev(DRM_MAJOR, "drm");
+
+ drm_connector_ida_destroy();
+ idr_destroy(&drm_minors_idr);
+}
+
+module_init(drm_core_init);
+module_exit(drm_core_exit);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index dfa9769b26b5..1dbf3bc4c6a3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3305,6 +3305,7 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder && connector->eld[0])
@@ -3775,8 +3776,14 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
- /* Populate picture aspect ratio from CEA mode list */
- if (frame->video_code > 0)
+ /*
+ * Populate picture aspect ratio from either
+ * user input (if specified) or from the CEA mode list.
+ */
+ if (mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_4_3 ||
+ mode->picture_aspect_ratio == HDMI_PICTURE_ASPECT_16_9)
+ frame->picture_aspect = mode->picture_aspect_ratio;
+ else if (frame->video_code > 0)
frame->picture_aspect = drm_get_cea_aspect_ratio(
frame->video_code);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index f27c883be391..cc0ae047ed3b 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -327,7 +327,7 @@ err_drm_gem_cma_free_object:
return ret;
}
-static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
+static const struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
.fb_probe = drm_fbdev_cma_create,
};
@@ -354,9 +354,10 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
- fbdev_cma->fb_helper.funcs = &drm_fb_cma_helper_funcs;
helper = &fbdev_cma->fb_helper;
+ drm_fb_helper_prepare(dev, helper, &drm_fb_cma_helper_funcs);
+
ret = drm_fb_helper_init(dev, helper, num_crtc, max_conn_count);
if (ret < 0) {
dev_err(dev->dev, "Failed to initialize drm fb helper.\n");
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d5d8cea1a679..3144db9dc0f1 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -49,10 +49,11 @@ static LIST_HEAD(kernel_fb_helper_list);
* helper functions used by many drivers to implement the kernel mode setting
* interfaces.
*
- * Initialization is done as a three-step process with drm_fb_helper_init(),
- * drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config().
- * Drivers with fancier requirements than the default behaviour can override the
- * second step with their own code. Teardown is done with drm_fb_helper_fini().
+ * Initialization is done as a four-step process with drm_fb_helper_prepare(),
+ * drm_fb_helper_init(), drm_fb_helper_single_add_all_connectors() and
+ * drm_fb_helper_initial_config(). Drivers with fancier requirements than the
+ * default behaviour can override the third step with their own code.
+ * Teardown is done with drm_fb_helper_fini().
*
* At runtime drivers should restore the fbdev console by calling
* drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
@@ -63,6 +64,19 @@ static LIST_HEAD(kernel_fb_helper_list);
*
* All other functions exported by the fb helper library can be used to
* implement the fbdev driver interface by the driver.
+ *
+ * It is possible, though perhaps somewhat tricky, to implement race-free
+ * hotplug detection using the fbdev helpers. The drm_fb_helper_prepare()
+ * helper must be called first to initialize the minimum required to make
+ * hotplug detection work. Drivers also need to make sure to properly set up
+ * the dev->mode_config.funcs member. After calling drm_kms_helper_poll_init()
+ * it is safe to enable interrupts and start processing hotplug events. At the
+ * same time, drivers should initialize all modeset objects such as CRTCs,
+ * encoders and connectors. To finish up the fbdev helper initialization, the
+ * drm_fb_helper_init() function is called. To probe for all attached displays
+ * and set up an initial configuration using the detected hardware, drivers
+ * should call drm_fb_helper_single_add_all_connectors() followed by
+ * drm_fb_helper_initial_config().
*/
/**
@@ -105,6 +119,58 @@ fail:
}
EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
+int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector)
+{
+ struct drm_fb_helper_connector **temp;
+ struct drm_fb_helper_connector *fb_helper_connector;
+
+ WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
+ if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) {
+ temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector) * (fb_helper->connector_count + 1), GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ fb_helper->connector_info_alloc_count = fb_helper->connector_count + 1;
+ fb_helper->connector_info = temp;
+ }
+
+
+ fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
+ if (!fb_helper_connector)
+ return -ENOMEM;
+
+ fb_helper_connector->connector = connector;
+ fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
+
+int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
+ struct drm_connector *connector)
+{
+ struct drm_fb_helper_connector *fb_helper_connector;
+ int i, j;
+
+ WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ if (fb_helper->connector_info[i]->connector == connector)
+ break;
+ }
+
+ if (i == fb_helper->connector_count)
+ return -EINVAL;
+ fb_helper_connector = fb_helper->connector_info[i];
+
+ for (j = i + 1; j < fb_helper->connector_count; j++) {
+ fb_helper->connector_info[j - 1] = fb_helper->connector_info[j];
+ }
+ fb_helper->connector_count--;
+ kfree(fb_helper_connector);
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
+
static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
{
struct drm_fb_helper_connector *fb_helper_conn;
@@ -199,9 +265,6 @@ int drm_fb_helper_debug_enter(struct fb_info *info)
struct drm_crtc_helper_funcs *funcs;
int i;
- if (list_empty(&kernel_fb_helper_list))
- return false;
-
list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
for (i = 0; i < helper->crtc_count; i++) {
struct drm_mode_set *mode_set =
@@ -531,6 +594,24 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
}
/**
+ * drm_fb_helper_prepare - setup a drm_fb_helper structure
+ * @dev: DRM device
+ * @helper: driver-allocated fbdev helper structure to set up
+ * @funcs: pointer to structure of functions associate with this helper
+ *
+ * Sets up the bare minimum to make the framebuffer helper usable. This is
+ * useful to implement race-free initialization of the polling helpers.
+ */
+void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
+ const struct drm_fb_helper_funcs *funcs)
+{
+ INIT_LIST_HEAD(&helper->kernel_fb_list);
+ helper->funcs = funcs;
+ helper->dev = dev;
+}
+EXPORT_SYMBOL(drm_fb_helper_prepare);
+
+/**
* drm_fb_helper_init - initialize a drm_fb_helper structure
* @dev: drm device
* @fb_helper: driver-allocated fbdev helper structure to initialize
@@ -542,8 +623,7 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
* nor register the fbdev. This is only done in drm_fb_helper_initial_config()
* to allow driver writes more control over the exact init sequence.
*
- * Drivers must set fb_helper->funcs before calling
- * drm_fb_helper_initial_config().
+ * Drivers must call drm_fb_helper_prepare() before calling this function.
*
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
@@ -558,10 +638,6 @@ int drm_fb_helper_init(struct drm_device *dev,
if (!max_conn_count)
return -EINVAL;
- fb_helper->dev = dev;
-
- INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
-
fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
if (!fb_helper->crtc_info)
return -ENOMEM;
@@ -572,6 +648,7 @@ int drm_fb_helper_init(struct drm_device *dev,
kfree(fb_helper->crtc_info);
return -ENOMEM;
}
+ fb_helper->connector_info_alloc_count = dev->mode_config.num_connector;
fb_helper->connector_count = 0;
for (i = 0; i < crtc_count; i++) {
@@ -1056,7 +1133,6 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
info->fix.ypanstep = 1; /* doing it in hw */
info->fix.ywrapstep = 0;
info->fix.accel = FB_ACCEL_NONE;
- info->fix.type_aux = 0;
info->fix.line_length = pitch;
return;
@@ -1613,8 +1689,10 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
* either the output polling work or a work item launched from the driver's
* hotplug interrupt).
*
- * Note that the driver must ensure that this is only called _after_ the fb has
- * been fully set up, i.e. after the call to drm_fb_helper_initial_config.
+ * Note that drivers may call this even before calling
+ * drm_fb_helper_initial_config but only aftert drm_fb_helper_init. This allows
+ * for a race-free fbcon setup and will make sure that the fbdev emulation will
+ * not miss any hotplug events.
*
* RETURNS:
* 0 on success and a non-zero error code otherwise.
@@ -1624,11 +1702,8 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
struct drm_device *dev = fb_helper->dev;
u32 max_width, max_height;
- if (!fb_helper->fb)
- return 0;
-
mutex_lock(&fb_helper->dev->mode_config.mutex);
- if (!drm_fb_helper_is_bound(fb_helper)) {
+ if (!fb_helper->fb || !drm_fb_helper_is_bound(fb_helper)) {
fb_helper->delayed_hotplug = true;
mutex_unlock(&fb_helper->dev->mode_config.mutex);
return 0;
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 021fe5d11df5..79d5221c6e41 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -38,6 +38,7 @@
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include "drm_legacy.h"
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
@@ -112,55 +113,12 @@ err_undo:
EXPORT_SYMBOL(drm_open);
/**
- * File \c open operation.
- *
- * \param inode device inode.
- * \param filp file pointer.
- *
- * Puts the dev->fops corresponding to the device minor number into
- * \p filp, call the \c open method, and restore the file operations.
- */
-int drm_stub_open(struct inode *inode, struct file *filp)
-{
- struct drm_device *dev;
- struct drm_minor *minor;
- int err = -ENODEV;
- const struct file_operations *new_fops;
-
- DRM_DEBUG("\n");
-
- mutex_lock(&drm_global_mutex);
- minor = drm_minor_acquire(iminor(inode));
- if (IS_ERR(minor))
- goto out_unlock;
-
- dev = minor->dev;
- new_fops = fops_get(dev->driver->fops);
- if (!new_fops)
- goto out_release;
-
- replace_fops(filp, new_fops);
- if (filp->f_op->open)
- err = filp->f_op->open(inode, filp);
-
-out_release:
- drm_minor_release(minor);
-out_unlock:
- mutex_unlock(&drm_global_mutex);
- return err;
-}
-
-/**
* Check whether DRI will run on this CPU.
*
* \return non-zero if the DRI will run on this CPU, or zero otherwise.
*/
static int drm_cpu_valid(void)
{
-#if defined(__i386__)
- if (boot_cpu_data.x86 == 3)
- return 0; /* No cmpxchg on a 386 */
-#endif
#if defined(__sparc__) && !defined(__sparc_v9__)
return 0; /* No cmpxchg before v9 sparc. */
#endif
@@ -203,8 +161,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
priv->minor = minor;
/* for compatibility root is always authenticated */
- priv->always_authenticated = capable(CAP_SYS_ADMIN);
- priv->authenticated = priv->always_authenticated;
+ priv->authenticated = capable(CAP_SYS_ADMIN);
priv->lock_count = 0;
INIT_LIST_HEAD(&priv->lhead);
@@ -429,6 +386,10 @@ int drm_release(struct inode *inode, struct file *filp)
DRM_DEBUG("open_count = %d\n", dev->open_count);
+ mutex_lock(&dev->struct_mutex);
+ list_del(&file_priv->lhead);
+ mutex_unlock(&dev->struct_mutex);
+
if (dev->driver->preclose)
dev->driver->preclose(dev, file_priv);
@@ -461,44 +422,18 @@ int drm_release(struct inode *inode, struct file *filp)
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_release(dev, file_priv);
- mutex_lock(&dev->ctxlist_mutex);
- if (!list_empty(&dev->ctxlist)) {
- struct drm_ctx_list *pos, *n;
-
- list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
- if (pos->tag == file_priv &&
- pos->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev,
- pos->handle);
-
- drm_ctxbitmap_free(dev, pos->handle);
-
- list_del(&pos->head);
- kfree(pos);
- }
- }
- }
- mutex_unlock(&dev->ctxlist_mutex);
+ drm_legacy_ctxbitmap_flush(dev, file_priv);
mutex_lock(&dev->master_mutex);
if (file_priv->is_master) {
struct drm_master *master = file_priv->master;
- struct drm_file *temp;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(temp, &dev->filelist, lhead) {
- if ((temp->master == file_priv->master) &&
- (temp != file_priv))
- temp->authenticated = temp->always_authenticated;
- }
/**
* Since the master is disappearing, so is the
* possibility to lock.
*/
-
+ mutex_lock(&dev->struct_mutex);
if (master->lock.hw_lock) {
if (dev->sigdata.lock == master->lock.hw_lock)
dev->sigdata.lock = NULL;
@@ -522,10 +457,6 @@ int drm_release(struct inode *inode, struct file *filp)
file_priv->is_master = 0;
mutex_unlock(&dev->master_mutex);
- mutex_lock(&dev->struct_mutex);
- list_del(&file_priv->lhead);
- mutex_unlock(&dev->struct_mutex);
-
if (dev->driver->postclose)
dev->driver->postclose(dev, file_priv);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index f7d71190aad5..6adee4c2afc0 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -441,18 +441,31 @@ EXPORT_SYMBOL(drm_gem_create_mmap_offset);
* drm_gem_get_pages - helper to allocate backing pages for a GEM object
* from shmem
* @obj: obj in question
- * @gfpmask: gfp mask of requested pages
+ *
+ * This reads the page-array of the shmem-backing storage of the given gem
+ * object. An array of pages is returned. If a page is not allocated or
+ * swapped-out, this will allocate/swap-in the required pages. Note that the
+ * whole object is covered by the page-array and pinned in memory.
+ *
+ * Use drm_gem_put_pages() to release the array and unpin all pages.
+ *
+ * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
+ * If you require other GFP-masks, you have to do those allocations yourself.
+ *
+ * Note that you are not allowed to change gfp-zones during runtime. That is,
+ * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
+ * set during initialization. If you have special zone constraints, set them
+ * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
+ * to keep pages in the required zone during swap-in.
*/
-struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+struct page **drm_gem_get_pages(struct drm_gem_object *obj)
{
- struct inode *inode;
struct address_space *mapping;
struct page *p, **pages;
int i, npages;
/* This is the shared memory object that backs the GEM resource */
- inode = file_inode(obj->filp);
- mapping = inode->i_mapping;
+ mapping = file_inode(obj->filp)->i_mapping;
/* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init(), so we should never hit this unless
@@ -466,10 +479,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
if (pages == NULL)
return ERR_PTR(-ENOMEM);
- gfpmask |= mapping_gfp_mask(mapping);
-
for (i = 0; i < npages; i++) {
- p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+ p = shmem_read_mapping_page(mapping, i);
if (IS_ERR(p))
goto fail;
pages[i] = p;
@@ -479,7 +490,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
* __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
* so shmem can relocate pages during swapin if required.
*/
- BUG_ON((gfpmask & __GFP_DMA32) &&
+ BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) &&
(page_to_pfn(p) >= 0x00100000UL));
}
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 05c97c5350a1..e467e67af6e7 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -327,7 +327,7 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev, size_t size,
/* Create a CMA GEM buffer. */
cma_obj = __drm_gem_cma_create(dev, size);
if (IS_ERR(cma_obj))
- return ERR_PTR(PTR_ERR(cma_obj));
+ return ERR_CAST(cma_obj);
cma_obj->paddr = sg_dma_address(sgt->sgl);
cma_obj->sgt = sgt;
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 86feedd5e6f6..ecaf0fa2eec8 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -132,7 +132,7 @@ int drm_bufs_info(struct seq_file *m, void *data)
i,
dma->bufs[i].buf_size,
dma->bufs[i].buf_count,
- atomic_read(&dma->bufs[i].freelist.count),
+ 0,
dma->bufs[i].seg_count,
seg_pages,
seg_pages * PAGE_SIZE / 1024);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 69c61f392e66..40be746b7e68 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -1,11 +1,3 @@
-/**
- * \file drm_ioctl.c
- * IOCTL processing for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
/*
* Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
*
@@ -13,6 +5,9 @@
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
+ * Author Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author Gareth Hughes <gareth@valinux.com>
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
@@ -35,6 +30,7 @@
#include <drm/drmP.h>
#include <drm/drm_core.h>
+#include "drm_legacy.h"
#include <linux/pci.h>
#include <linux/export.h>
@@ -42,6 +38,124 @@
#include <asm/mtrr.h>
#endif
+static int drm_version(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
+ [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
+
+/** Ioctl table */
+static const struct drm_ioctl_desc drm_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+#if __OS_HAS_AGP
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+#endif
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+};
+
+#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
+
/**
* Get the bus id.
*
@@ -342,8 +456,6 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
file_priv->stereo_allowed = req->value;
break;
case DRM_CLIENT_CAP_UNIVERSAL_PLANES:
- if (!drm_universal_planes)
- return -EINVAL;
if (req->value > 1)
return -EINVAL;
file_priv->universal_planes = req->value;
@@ -417,3 +529,243 @@ int drm_noop(struct drm_device *dev, void *data,
return 0;
}
EXPORT_SYMBOL(drm_noop);
+
+/**
+ * Copy and IOCTL return string to user space
+ */
+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
+{
+ int len;
+
+ /* don't overflow userbuf */
+ len = strlen(value);
+ if (len > *buf_len)
+ len = *buf_len;
+
+ /* let userspace know exact length of driver value (which could be
+ * larger than the userspace-supplied buffer) */
+ *buf_len = strlen(value);
+
+ /* finally, try filling in the userbuf */
+ if (len && buf)
+ if (copy_to_user(buf, value, len))
+ return -EFAULT;
+ return 0;
+}
+
+/**
+ * Get version information
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_version structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Fills in the version information in \p arg.
+ */
+static int drm_version(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_version *version = data;
+ int err;
+
+ version->version_major = dev->driver->major;
+ version->version_minor = dev->driver->minor;
+ version->version_patchlevel = dev->driver->patchlevel;
+ err = drm_copy_field(version->name, &version->name_len,
+ dev->driver->name);
+ if (!err)
+ err = drm_copy_field(version->date, &version->date_len,
+ dev->driver->date);
+ if (!err)
+ err = drm_copy_field(version->desc, &version->desc_len,
+ dev->driver->desc);
+
+ return err;
+}
+
+/**
+ * drm_ioctl_permit - Check ioctl permissions against caller
+ *
+ * @flags: ioctl permission flags.
+ * @file_priv: Pointer to struct drm_file identifying the caller.
+ *
+ * Checks whether the caller is allowed to run an ioctl with the
+ * indicated permissions. If so, returns zero. Otherwise returns an
+ * error code suitable for ioctl return.
+ */
+static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
+{
+ /* ROOT_ONLY is only for CAP_SYS_ADMIN */
+ if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
+ return -EACCES;
+
+ /* AUTH is only for authenticated or render client */
+ if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
+ !file_priv->authenticated))
+ return -EACCES;
+
+ /* MASTER is only for master or control clients */
+ if (unlikely((flags & DRM_MASTER) && !file_priv->is_master &&
+ !drm_is_control_client(file_priv)))
+ return -EACCES;
+
+ /* Control clients must be explicitly allowed */
+ if (unlikely(!(flags & DRM_CONTROL_ALLOW) &&
+ drm_is_control_client(file_priv)))
+ return -EACCES;
+
+ /* Render clients must be explicitly allowed */
+ if (unlikely(!(flags & DRM_RENDER_ALLOW) &&
+ drm_is_render_client(file_priv)))
+ return -EACCES;
+
+ return 0;
+}
+
+/**
+ * Called whenever a process performs an ioctl on /dev/drm.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ *
+ * Looks up the ioctl function in the ::ioctls table, checking for root
+ * previleges if so required, and dispatches to the respective function.
+ */
+long drm_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev;
+ const struct drm_ioctl_desc *ioctl = NULL;
+ drm_ioctl_t *func;
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+ int retcode = -EINVAL;
+ char stack_kdata[128];
+ char *kdata = NULL;
+ unsigned int usize, asize;
+
+ dev = file_priv->minor->dev;
+
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+
+ if ((nr >= DRM_CORE_IOCTL_COUNT) &&
+ ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
+ goto err_i1;
+ if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
+ (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
+ u32 drv_size;
+ ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+ drv_size = _IOC_SIZE(ioctl->cmd_drv);
+ usize = asize = _IOC_SIZE(cmd);
+ if (drv_size > asize)
+ asize = drv_size;
+ cmd = ioctl->cmd_drv;
+ }
+ else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
+ u32 drv_size;
+
+ ioctl = &drm_ioctls[nr];
+
+ drv_size = _IOC_SIZE(ioctl->cmd);
+ usize = asize = _IOC_SIZE(cmd);
+ if (drv_size > asize)
+ asize = drv_size;
+
+ cmd = ioctl->cmd;
+ } else
+ goto err_i1;
+
+ DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated, ioctl->name);
+
+ /* Do not trust userspace, use our own definition */
+ func = ioctl->func;
+
+ if (unlikely(!func)) {
+ DRM_DEBUG("no function\n");
+ retcode = -EINVAL;
+ goto err_i1;
+ }
+
+ retcode = drm_ioctl_permit(ioctl->flags, file_priv);
+ if (unlikely(retcode))
+ goto err_i1;
+
+ if (cmd & (IOC_IN | IOC_OUT)) {
+ if (asize <= sizeof(stack_kdata)) {
+ kdata = stack_kdata;
+ } else {
+ kdata = kmalloc(asize, GFP_KERNEL);
+ if (!kdata) {
+ retcode = -ENOMEM;
+ goto err_i1;
+ }
+ }
+ if (asize > usize)
+ memset(kdata + usize, 0, asize - usize);
+ }
+
+ if (cmd & IOC_IN) {
+ if (copy_from_user(kdata, (void __user *)arg,
+ usize) != 0) {
+ retcode = -EFAULT;
+ goto err_i1;
+ }
+ } else if (cmd & IOC_OUT) {
+ memset(kdata, 0, usize);
+ }
+
+ if (ioctl->flags & DRM_UNLOCKED)
+ retcode = func(dev, kdata, file_priv);
+ else {
+ mutex_lock(&drm_global_mutex);
+ retcode = func(dev, kdata, file_priv);
+ mutex_unlock(&drm_global_mutex);
+ }
+
+ if (cmd & IOC_OUT) {
+ if (copy_to_user((void __user *)arg, kdata,
+ usize) != 0)
+ retcode = -EFAULT;
+ }
+
+ err_i1:
+ if (!ioctl)
+ DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+ task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated, cmd, nr);
+
+ if (kdata != stack_kdata)
+ kfree(kdata);
+ if (retcode)
+ DRM_DEBUG("ret = %d\n", retcode);
+ return retcode;
+}
+EXPORT_SYMBOL(drm_ioctl);
+
+/**
+ * drm_ioctl_flags - Check for core ioctl and return ioctl permission flags
+ *
+ * @nr: Ioctl number.
+ * @flags: Where to return the ioctl permission flags
+ */
+bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
+{
+ if ((nr >= DRM_COMMAND_END && nr < DRM_CORE_IOCTL_COUNT) ||
+ (nr < DRM_COMMAND_BASE)) {
+ *flags = drm_ioctls[nr].flags;
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(drm_ioctl_flags);
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
new file mode 100644
index 000000000000..d34f20a79b7c
--- /dev/null
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -0,0 +1,51 @@
+#ifndef __DRM_LEGACY_H__
+#define __DRM_LEGACY_H__
+
+/*
+ * Copyright (c) 2014 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+struct drm_device;
+struct drm_file;
+
+/*
+ * Generic DRM Contexts
+ */
+
+#define DRM_KERNEL_CONTEXT 0
+#define DRM_RESERVED_CONTEXTS 1
+
+int drm_legacy_ctxbitmap_init(struct drm_device *dev);
+void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
+void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
+void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
+
+int drm_legacy_resctx(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_addctx(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_getctx(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_switchctx(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_newctx(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f);
+
+int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
+
+#endif /* __DRM_LEGACY_H__ */
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index f6452682141b..e26b59e385ff 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -35,6 +35,7 @@
#include <linux/export.h>
#include <drm/drmP.h>
+#include "drm_legacy.h"
static int drm_notifier(void *priv);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index e633df2f68d8..6aa6a9e95570 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -201,16 +201,15 @@ EXPORT_SYMBOL(mipi_dsi_detach);
/**
* mipi_dsi_dcs_write - send DCS write command
* @dsi: DSI device
- * @channel: virtual channel
* @data: pointer to the command followed by parameters
* @len: length of @data
*/
-int mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, unsigned int channel,
- const void *data, size_t len)
+ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data,
+ size_t len)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
struct mipi_dsi_msg msg = {
- .channel = channel,
+ .channel = dsi->channel,
.tx_buf = data,
.tx_len = len
};
@@ -239,19 +238,18 @@ EXPORT_SYMBOL(mipi_dsi_dcs_write);
/**
* mipi_dsi_dcs_read - send DCS read request command
* @dsi: DSI device
- * @channel: virtual channel
* @cmd: DCS read command
* @data: pointer to read buffer
* @len: length of @data
*
* Function returns number of read bytes or error code.
*/
-ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, unsigned int channel,
- u8 cmd, void *data, size_t len)
+ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
+ size_t len)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
struct mipi_dsi_msg msg = {
- .channel = channel,
+ .channel = dsi->channel,
.type = MIPI_DSI_DCS_READ,
.tx_buf = &cmd,
.tx_len = 1,
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
new file mode 100644
index 000000000000..16150a00c237
--- /dev/null
+++ b/drivers/gpu/drm/drm_of.c
@@ -0,0 +1,67 @@
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/of_graph.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_of.h>
+
+/**
+ * drm_crtc_port_mask - find the mask of a registered CRTC by port OF node
+ * @dev: DRM device
+ * @port: port OF node
+ *
+ * Given a port OF node, return the possible mask of the corresponding
+ * CRTC within a device's list of CRTCs. Returns zero if not found.
+ */
+static uint32_t drm_crtc_port_mask(struct drm_device *dev,
+ struct device_node *port)
+{
+ unsigned int index = 0;
+ struct drm_crtc *tmp;
+
+ list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
+ if (tmp->port == port)
+ return 1 << index;
+
+ index++;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_of_find_possible_crtcs - find the possible CRTCs for an encoder port
+ * @dev: DRM device
+ * @port: encoder port to scan for endpoints
+ *
+ * Scan all endpoints attached to a port, locate their attached CRTCs,
+ * and generate the DRM mask of CRTCs which may be attached to this
+ * encoder.
+ *
+ * See Documentation/devicetree/bindings/graph.txt for the bindings.
+ */
+uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
+ struct device_node *port)
+{
+ struct device_node *remote_port, *ep = NULL;
+ uint32_t possible_crtcs = 0;
+
+ do {
+ ep = of_graph_get_next_endpoint(port, ep);
+ if (!ep)
+ break;
+
+ remote_port = of_graph_get_remote_port(ep);
+ if (!remote_port) {
+ of_node_put(ep);
+ return 0;
+ }
+
+ possible_crtcs |= drm_crtc_port_mask(dev, remote_port);
+
+ of_node_put(remote_port);
+ } while (1);
+
+ return possible_crtcs;
+}
+EXPORT_SYMBOL(drm_of_find_possible_crtcs);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 6d133149cc74..827ec1a3040b 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -335,9 +335,10 @@ struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
}
/* possible_crtc's will be filled in later by crtc_init */
- ret = drm_plane_init(dev, primary, 0, &drm_primary_helper_funcs,
- formats, num_formats,
- DRM_PLANE_TYPE_PRIMARY);
+ ret = drm_universal_plane_init(dev, primary, 0,
+ &drm_primary_helper_funcs,
+ formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY);
if (ret) {
kfree(primary);
primary = NULL;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index d22676b89cbb..db7d250f7ac7 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -130,7 +130,14 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
count = drm_load_edid_firmware(connector);
if (count == 0)
#endif
- count = (*connector_funcs->get_modes)(connector);
+ {
+ if (connector->override_edid) {
+ struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
+
+ count = drm_add_edid_modes(connector, edid);
+ } else
+ count = (*connector_funcs->get_modes)(connector);
+ }
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
diff --git a/drivers/gpu/drm/drm_rect.c b/drivers/gpu/drm/drm_rect.c
index 7047ca025787..631f5afd451c 100644
--- a/drivers/gpu/drm/drm_rect.c
+++ b/drivers/gpu/drm/drm_rect.c
@@ -293,3 +293,143 @@ void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point)
DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
}
EXPORT_SYMBOL(drm_rect_debug_print);
+
+/**
+ * drm_rect_rotate - Rotate the rectangle
+ * @r: rectangle to be rotated
+ * @width: Width of the coordinate space
+ * @height: Height of the coordinate space
+ * @rotation: Transformation to be applied
+ *
+ * Apply @rotation to the coordinates of rectangle @r.
+ *
+ * @width and @height combined with @rotation define
+ * the location of the new origin.
+ *
+ * @width correcsponds to the horizontal and @height
+ * to the vertical axis of the untransformed coordinate
+ * space.
+ */
+void drm_rect_rotate(struct drm_rect *r,
+ int width, int height,
+ unsigned int rotation)
+{
+ struct drm_rect tmp;
+
+ if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
+ tmp = *r;
+
+ if (rotation & BIT(DRM_REFLECT_X)) {
+ r->x1 = width - tmp.x2;
+ r->x2 = width - tmp.x1;
+ }
+
+ if (rotation & BIT(DRM_REFLECT_Y)) {
+ r->y1 = height - tmp.y2;
+ r->y2 = height - tmp.y1;
+ }
+ }
+
+ switch (rotation & 0xf) {
+ case BIT(DRM_ROTATE_0):
+ break;
+ case BIT(DRM_ROTATE_90):
+ tmp = *r;
+ r->x1 = tmp.y1;
+ r->x2 = tmp.y2;
+ r->y1 = width - tmp.x2;
+ r->y2 = width - tmp.x1;
+ break;
+ case BIT(DRM_ROTATE_180):
+ tmp = *r;
+ r->x1 = width - tmp.x2;
+ r->x2 = width - tmp.x1;
+ r->y1 = height - tmp.y2;
+ r->y2 = height - tmp.y1;
+ break;
+ case BIT(DRM_ROTATE_270):
+ tmp = *r;
+ r->x1 = height - tmp.y2;
+ r->x2 = height - tmp.y1;
+ r->y1 = tmp.x1;
+ r->y2 = tmp.x2;
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL(drm_rect_rotate);
+
+/**
+ * drm_rect_rotate_inv - Inverse rotate the rectangle
+ * @r: rectangle to be rotated
+ * @width: Width of the coordinate space
+ * @height: Height of the coordinate space
+ * @rotation: Transformation whose inverse is to be applied
+ *
+ * Apply the inverse of @rotation to the coordinates
+ * of rectangle @r.
+ *
+ * @width and @height combined with @rotation define
+ * the location of the new origin.
+ *
+ * @width correcsponds to the horizontal and @height
+ * to the vertical axis of the original untransformed
+ * coordinate space, so that you never have to flip
+ * them when doing a rotatation and its inverse.
+ * That is, if you do:
+ *
+ * drm_rotate(&r, width, height, rotation);
+ * drm_rotate_inv(&r, width, height, rotation);
+ *
+ * you will always get back the original rectangle.
+ */
+void drm_rect_rotate_inv(struct drm_rect *r,
+ int width, int height,
+ unsigned int rotation)
+{
+ struct drm_rect tmp;
+
+ switch (rotation & 0xf) {
+ case BIT(DRM_ROTATE_0):
+ break;
+ case BIT(DRM_ROTATE_90):
+ tmp = *r;
+ r->x1 = width - tmp.y2;
+ r->x2 = width - tmp.y1;
+ r->y1 = tmp.x1;
+ r->y2 = tmp.x2;
+ break;
+ case BIT(DRM_ROTATE_180):
+ tmp = *r;
+ r->x1 = width - tmp.x2;
+ r->x2 = width - tmp.x1;
+ r->y1 = height - tmp.y2;
+ r->y2 = height - tmp.y1;
+ break;
+ case BIT(DRM_ROTATE_270):
+ tmp = *r;
+ r->x1 = tmp.y1;
+ r->x2 = tmp.y2;
+ r->y1 = height - tmp.x2;
+ r->y2 = height - tmp.x1;
+ break;
+ default:
+ break;
+ }
+
+ if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
+ tmp = *r;
+
+ if (rotation & BIT(DRM_REFLECT_X)) {
+ r->x1 = width - tmp.x2;
+ r->x2 = width - tmp.x1;
+ }
+
+ if (rotation & BIT(DRM_REFLECT_Y)) {
+ r->y1 = height - tmp.y2;
+ r->y2 = height - tmp.y1;
+ }
+ }
+}
+EXPORT_SYMBOL(drm_rect_rotate_inv);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
deleted file mode 100644
index 14d16464000a..000000000000
--- a/drivers/gpu/drm/drm_stub.c
+++ /dev/null
@@ -1,805 +0,0 @@
-/*
- * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
- *
- * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Author Rickard E. (Rik) Faith <faith@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/mount.h>
-#include <linux/slab.h>
-#include <drm/drmP.h>
-#include <drm/drm_core.h>
-
-unsigned int drm_debug = 0; /* 1 to enable debug output */
-EXPORT_SYMBOL(drm_debug);
-
-unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
-EXPORT_SYMBOL(drm_rnodes);
-
-/* 1 to allow user space to request universal planes (experimental) */
-unsigned int drm_universal_planes = 0;
-EXPORT_SYMBOL(drm_universal_planes);
-
-unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
-EXPORT_SYMBOL(drm_vblank_offdelay);
-
-unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
-EXPORT_SYMBOL(drm_timestamp_precision);
-
-/*
- * Default to use monotonic timestamps for wait-for-vblank and page-flip
- * complete events.
- */
-unsigned int drm_timestamp_monotonic = 1;
-
-MODULE_AUTHOR(CORE_AUTHOR);
-MODULE_DESCRIPTION(CORE_DESC);
-MODULE_LICENSE("GPL and additional rights");
-MODULE_PARM_DESC(debug, "Enable debug output");
-MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API");
-MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
-MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
-MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
-
-module_param_named(debug, drm_debug, int, 0600);
-module_param_named(rnodes, drm_rnodes, int, 0600);
-module_param_named(universal_planes, drm_universal_planes, int, 0600);
-module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
-module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
-module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
-
-static DEFINE_SPINLOCK(drm_minor_lock);
-struct idr drm_minors_idr;
-
-struct class *drm_class;
-struct dentry *drm_debugfs_root;
-
-int drm_err(const char *func, const char *format, ...)
-{
- struct va_format vaf;
- va_list args;
- int r;
-
- va_start(args, format);
-
- vaf.fmt = format;
- vaf.va = &args;
-
- r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
-
- va_end(args);
-
- return r;
-}
-EXPORT_SYMBOL(drm_err);
-
-void drm_ut_debug_printk(const char *function_name, const char *format, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, format);
- vaf.fmt = format;
- vaf.va = &args;
-
- printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
-
- va_end(args);
-}
-EXPORT_SYMBOL(drm_ut_debug_printk);
-
-struct drm_master *drm_master_create(struct drm_minor *minor)
-{
- struct drm_master *master;
-
- master = kzalloc(sizeof(*master), GFP_KERNEL);
- if (!master)
- return NULL;
-
- kref_init(&master->refcount);
- spin_lock_init(&master->lock.spinlock);
- init_waitqueue_head(&master->lock.lock_queue);
- if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
- kfree(master);
- return NULL;
- }
- INIT_LIST_HEAD(&master->magicfree);
- master->minor = minor;
-
- return master;
-}
-
-struct drm_master *drm_master_get(struct drm_master *master)
-{
- kref_get(&master->refcount);
- return master;
-}
-EXPORT_SYMBOL(drm_master_get);
-
-static void drm_master_destroy(struct kref *kref)
-{
- struct drm_master *master = container_of(kref, struct drm_master, refcount);
- struct drm_magic_entry *pt, *next;
- struct drm_device *dev = master->minor->dev;
- struct drm_map_list *r_list, *list_temp;
-
- mutex_lock(&dev->struct_mutex);
- if (dev->driver->master_destroy)
- dev->driver->master_destroy(dev, master);
-
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
- if (r_list->master == master) {
- drm_rmmap_locked(dev, r_list->map);
- r_list = NULL;
- }
- }
-
- if (master->unique) {
- kfree(master->unique);
- master->unique = NULL;
- master->unique_len = 0;
- }
-
- list_for_each_entry_safe(pt, next, &master->magicfree, head) {
- list_del(&pt->head);
- drm_ht_remove_item(&master->magiclist, &pt->hash_item);
- kfree(pt);
- }
-
- drm_ht_remove(&master->magiclist);
-
- mutex_unlock(&dev->struct_mutex);
- kfree(master);
-}
-
-void drm_master_put(struct drm_master **master)
-{
- kref_put(&(*master)->refcount, drm_master_destroy);
- *master = NULL;
-}
-EXPORT_SYMBOL(drm_master_put);
-
-int drm_setmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret = 0;
-
- mutex_lock(&dev->master_mutex);
- if (file_priv->is_master)
- goto out_unlock;
-
- if (file_priv->minor->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- if (!file_priv->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
- file_priv->minor->master = drm_master_get(file_priv->master);
- file_priv->is_master = 1;
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, file_priv, false);
- if (unlikely(ret != 0)) {
- file_priv->is_master = 0;
- drm_master_put(&file_priv->minor->master);
- }
- }
-
-out_unlock:
- mutex_unlock(&dev->master_mutex);
- return ret;
-}
-
-int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int ret = -EINVAL;
-
- mutex_lock(&dev->master_mutex);
- if (!file_priv->is_master)
- goto out_unlock;
-
- if (!file_priv->minor->master)
- goto out_unlock;
-
- ret = 0;
- if (dev->driver->master_drop)
- dev->driver->master_drop(dev, file_priv, false);
- drm_master_put(&file_priv->minor->master);
- file_priv->is_master = 0;
-
-out_unlock:
- mutex_unlock(&dev->master_mutex);
- return ret;
-}
-
-/*
- * DRM Minors
- * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
- * of them is represented by a drm_minor object. Depending on the capabilities
- * of the device-driver, different interfaces are registered.
- *
- * Minors can be accessed via dev->$minor_name. This pointer is either
- * NULL or a valid drm_minor pointer and stays valid as long as the device is
- * valid. This means, DRM minors have the same life-time as the underlying
- * device. However, this doesn't mean that the minor is active. Minors are
- * registered and unregistered dynamically according to device-state.
- */
-
-static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
- unsigned int type)
-{
- switch (type) {
- case DRM_MINOR_LEGACY:
- return &dev->primary;
- case DRM_MINOR_RENDER:
- return &dev->render;
- case DRM_MINOR_CONTROL:
- return &dev->control;
- default:
- return NULL;
- }
-}
-
-static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
-{
- struct drm_minor *minor;
-
- minor = kzalloc(sizeof(*minor), GFP_KERNEL);
- if (!minor)
- return -ENOMEM;
-
- minor->type = type;
- minor->dev = dev;
-
- *drm_minor_get_slot(dev, type) = minor;
- return 0;
-}
-
-static void drm_minor_free(struct drm_device *dev, unsigned int type)
-{
- struct drm_minor **slot;
-
- slot = drm_minor_get_slot(dev, type);
- if (*slot) {
- drm_mode_group_destroy(&(*slot)->mode_group);
- kfree(*slot);
- *slot = NULL;
- }
-}
-
-static int drm_minor_register(struct drm_device *dev, unsigned int type)
-{
- struct drm_minor *new_minor;
- unsigned long flags;
- int ret;
- int minor_id;
-
- DRM_DEBUG("\n");
-
- new_minor = *drm_minor_get_slot(dev, type);
- if (!new_minor)
- return 0;
-
- idr_preload(GFP_KERNEL);
- spin_lock_irqsave(&drm_minor_lock, flags);
- minor_id = idr_alloc(&drm_minors_idr,
- NULL,
- 64 * type,
- 64 * (type + 1),
- GFP_NOWAIT);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
- idr_preload_end();
-
- if (minor_id < 0)
- return minor_id;
-
- new_minor->index = minor_id;
-
- ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
- if (ret) {
- DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
- goto err_id;
- }
-
- ret = drm_sysfs_device_add(new_minor);
- if (ret) {
- DRM_ERROR("DRM: Error sysfs_device_add.\n");
- goto err_debugfs;
- }
-
- /* replace NULL with @minor so lookups will succeed from now on */
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_replace(&drm_minors_idr, new_minor, new_minor->index);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
-
- DRM_DEBUG("new minor assigned %d\n", minor_id);
- return 0;
-
-err_debugfs:
- drm_debugfs_cleanup(new_minor);
-err_id:
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_remove(&drm_minors_idr, minor_id);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
- new_minor->index = 0;
- return ret;
-}
-
-static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
-{
- struct drm_minor *minor;
- unsigned long flags;
-
- minor = *drm_minor_get_slot(dev, type);
- if (!minor || !minor->kdev)
- return;
-
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_remove(&drm_minors_idr, minor->index);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
- minor->index = 0;
-
- drm_debugfs_cleanup(minor);
- drm_sysfs_device_remove(minor);
-}
-
-/**
- * drm_minor_acquire - Acquire a DRM minor
- * @minor_id: Minor ID of the DRM-minor
- *
- * Looks up the given minor-ID and returns the respective DRM-minor object. The
- * refence-count of the underlying device is increased so you must release this
- * object with drm_minor_release().
- *
- * As long as you hold this minor, it is guaranteed that the object and the
- * minor->dev pointer will stay valid! However, the device may get unplugged and
- * unregistered while you hold the minor.
- *
- * Returns:
- * Pointer to minor-object with increased device-refcount, or PTR_ERR on
- * failure.
- */
-struct drm_minor *drm_minor_acquire(unsigned int minor_id)
-{
- struct drm_minor *minor;
- unsigned long flags;
-
- spin_lock_irqsave(&drm_minor_lock, flags);
- minor = idr_find(&drm_minors_idr, minor_id);
- if (minor)
- drm_dev_ref(minor->dev);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
-
- if (!minor) {
- return ERR_PTR(-ENODEV);
- } else if (drm_device_is_unplugged(minor->dev)) {
- drm_dev_unref(minor->dev);
- return ERR_PTR(-ENODEV);
- }
-
- return minor;
-}
-
-/**
- * drm_minor_release - Release DRM minor
- * @minor: Pointer to DRM minor object
- *
- * Release a minor that was previously acquired via drm_minor_acquire().
- */
-void drm_minor_release(struct drm_minor *minor)
-{
- drm_dev_unref(minor->dev);
-}
-
-/**
- * drm_put_dev - Unregister and release a DRM device
- * @dev: DRM device
- *
- * Called at module unload time or when a PCI device is unplugged.
- *
- * Use of this function is discouraged. It will eventually go away completely.
- * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
- *
- * Cleans up all DRM device, calling drm_lastclose().
- */
-void drm_put_dev(struct drm_device *dev)
-{
- DRM_DEBUG("\n");
-
- if (!dev) {
- DRM_ERROR("cleanup called no dev\n");
- return;
- }
-
- drm_dev_unregister(dev);
- drm_dev_unref(dev);
-}
-EXPORT_SYMBOL(drm_put_dev);
-
-void drm_unplug_dev(struct drm_device *dev)
-{
- /* for a USB device */
- drm_minor_unregister(dev, DRM_MINOR_LEGACY);
- drm_minor_unregister(dev, DRM_MINOR_RENDER);
- drm_minor_unregister(dev, DRM_MINOR_CONTROL);
-
- mutex_lock(&drm_global_mutex);
-
- drm_device_set_unplugged(dev);
-
- if (dev->open_count == 0) {
- drm_put_dev(dev);
- }
- mutex_unlock(&drm_global_mutex);
-}
-EXPORT_SYMBOL(drm_unplug_dev);
-
-/*
- * DRM internal mount
- * We want to be able to allocate our own "struct address_space" to control
- * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
- * stand-alone address_space objects, so we need an underlying inode. As there
- * is no way to allocate an independent inode easily, we need a fake internal
- * VFS mount-point.
- *
- * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
- * frees it again. You are allowed to use iget() and iput() to get references to
- * the inode. But each drm_fs_inode_new() call must be paired with exactly one
- * drm_fs_inode_free() call (which does not have to be the last iput()).
- * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
- * between multiple inode-users. You could, technically, call
- * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
- * iput(), but this way you'd end up with a new vfsmount for each inode.
- */
-
-static int drm_fs_cnt;
-static struct vfsmount *drm_fs_mnt;
-
-static const struct dentry_operations drm_fs_dops = {
- .d_dname = simple_dname,
-};
-
-static const struct super_operations drm_fs_sops = {
- .statfs = simple_statfs,
-};
-
-static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
-{
- return mount_pseudo(fs_type,
- "drm:",
- &drm_fs_sops,
- &drm_fs_dops,
- 0x010203ff);
-}
-
-static struct file_system_type drm_fs_type = {
- .name = "drm",
- .owner = THIS_MODULE,
- .mount = drm_fs_mount,
- .kill_sb = kill_anon_super,
-};
-
-static struct inode *drm_fs_inode_new(void)
-{
- struct inode *inode;
- int r;
-
- r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
- if (r < 0) {
- DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
- return ERR_PTR(r);
- }
-
- inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
- if (IS_ERR(inode))
- simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
-
- return inode;
-}
-
-static void drm_fs_inode_free(struct inode *inode)
-{
- if (inode) {
- iput(inode);
- simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
- }
-}
-
-/**
- * drm_dev_alloc - Allocate new DRM device
- * @driver: DRM driver to allocate device for
- * @parent: Parent device object
- *
- * Allocate and initialize a new DRM device. No device registration is done.
- * Call drm_dev_register() to advertice the device to user space and register it
- * with other core subsystems.
- *
- * The initial ref-count of the object is 1. Use drm_dev_ref() and
- * drm_dev_unref() to take and drop further ref-counts.
- *
- * RETURNS:
- * Pointer to new DRM device, or NULL if out of memory.
- */
-struct drm_device *drm_dev_alloc(struct drm_driver *driver,
- struct device *parent)
-{
- struct drm_device *dev;
- int ret;
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return NULL;
-
- kref_init(&dev->ref);
- dev->dev = parent;
- dev->driver = driver;
-
- INIT_LIST_HEAD(&dev->filelist);
- INIT_LIST_HEAD(&dev->ctxlist);
- INIT_LIST_HEAD(&dev->vmalist);
- INIT_LIST_HEAD(&dev->maplist);
- INIT_LIST_HEAD(&dev->vblank_event_list);
-
- spin_lock_init(&dev->buf_lock);
- spin_lock_init(&dev->event_lock);
- mutex_init(&dev->struct_mutex);
- mutex_init(&dev->ctxlist_mutex);
- mutex_init(&dev->master_mutex);
-
- dev->anon_inode = drm_fs_inode_new();
- if (IS_ERR(dev->anon_inode)) {
- ret = PTR_ERR(dev->anon_inode);
- DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
- goto err_free;
- }
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
- if (ret)
- goto err_minors;
- }
-
- if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
- ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
- if (ret)
- goto err_minors;
- }
-
- ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
- if (ret)
- goto err_minors;
-
- if (drm_ht_create(&dev->map_hash, 12))
- goto err_minors;
-
- ret = drm_ctxbitmap_init(dev);
- if (ret) {
- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
- goto err_ht;
- }
-
- if (driver->driver_features & DRIVER_GEM) {
- ret = drm_gem_init(dev);
- if (ret) {
- DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
- goto err_ctxbitmap;
- }
- }
-
- return dev;
-
-err_ctxbitmap:
- drm_ctxbitmap_cleanup(dev);
-err_ht:
- drm_ht_remove(&dev->map_hash);
-err_minors:
- drm_minor_free(dev, DRM_MINOR_LEGACY);
- drm_minor_free(dev, DRM_MINOR_RENDER);
- drm_minor_free(dev, DRM_MINOR_CONTROL);
- drm_fs_inode_free(dev->anon_inode);
-err_free:
- mutex_destroy(&dev->master_mutex);
- kfree(dev);
- return NULL;
-}
-EXPORT_SYMBOL(drm_dev_alloc);
-
-static void drm_dev_release(struct kref *ref)
-{
- struct drm_device *dev = container_of(ref, struct drm_device, ref);
-
- if (dev->driver->driver_features & DRIVER_GEM)
- drm_gem_destroy(dev);
-
- drm_ctxbitmap_cleanup(dev);
- drm_ht_remove(&dev->map_hash);
- drm_fs_inode_free(dev->anon_inode);
-
- drm_minor_free(dev, DRM_MINOR_LEGACY);
- drm_minor_free(dev, DRM_MINOR_RENDER);
- drm_minor_free(dev, DRM_MINOR_CONTROL);
-
- mutex_destroy(&dev->master_mutex);
- kfree(dev->unique);
- kfree(dev);
-}
-
-/**
- * drm_dev_ref - Take reference of a DRM device
- * @dev: device to take reference of or NULL
- *
- * This increases the ref-count of @dev by one. You *must* already own a
- * reference when calling this. Use drm_dev_unref() to drop this reference
- * again.
- *
- * This function never fails. However, this function does not provide *any*
- * guarantee whether the device is alive or running. It only provides a
- * reference to the object and the memory associated with it.
- */
-void drm_dev_ref(struct drm_device *dev)
-{
- if (dev)
- kref_get(&dev->ref);
-}
-EXPORT_SYMBOL(drm_dev_ref);
-
-/**
- * drm_dev_unref - Drop reference of a DRM device
- * @dev: device to drop reference of or NULL
- *
- * This decreases the ref-count of @dev by one. The device is destroyed if the
- * ref-count drops to zero.
- */
-void drm_dev_unref(struct drm_device *dev)
-{
- if (dev)
- kref_put(&dev->ref, drm_dev_release);
-}
-EXPORT_SYMBOL(drm_dev_unref);
-
-/**
- * drm_dev_register - Register DRM device
- * @dev: Device to register
- * @flags: Flags passed to the driver's .load() function
- *
- * Register the DRM device @dev with the system, advertise device to user-space
- * and start normal device operation. @dev must be allocated via drm_dev_alloc()
- * previously.
- *
- * Never call this twice on any device!
- *
- * RETURNS:
- * 0 on success, negative error code on failure.
- */
-int drm_dev_register(struct drm_device *dev, unsigned long flags)
-{
- int ret;
-
- mutex_lock(&drm_global_mutex);
-
- ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
- if (ret)
- goto err_minors;
-
- ret = drm_minor_register(dev, DRM_MINOR_RENDER);
- if (ret)
- goto err_minors;
-
- ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
- if (ret)
- goto err_minors;
-
- if (dev->driver->load) {
- ret = dev->driver->load(dev, flags);
- if (ret)
- goto err_minors;
- }
-
- /* setup grouping for legacy outputs */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_mode_group_init_legacy_group(dev,
- &dev->primary->mode_group);
- if (ret)
- goto err_unload;
- }
-
- ret = 0;
- goto out_unlock;
-
-err_unload:
- if (dev->driver->unload)
- dev->driver->unload(dev);
-err_minors:
- drm_minor_unregister(dev, DRM_MINOR_LEGACY);
- drm_minor_unregister(dev, DRM_MINOR_RENDER);
- drm_minor_unregister(dev, DRM_MINOR_CONTROL);
-out_unlock:
- mutex_unlock(&drm_global_mutex);
- return ret;
-}
-EXPORT_SYMBOL(drm_dev_register);
-
-/**
- * drm_dev_unregister - Unregister DRM device
- * @dev: Device to unregister
- *
- * Unregister the DRM device from the system. This does the reverse of
- * drm_dev_register() but does not deallocate the device. The caller must call
- * drm_dev_unref() to drop their final reference.
- */
-void drm_dev_unregister(struct drm_device *dev)
-{
- struct drm_map_list *r_list, *list_temp;
-
- drm_lastclose(dev);
-
- if (dev->driver->unload)
- dev->driver->unload(dev);
-
- if (dev->agp)
- drm_pci_agp_destroy(dev);
-
- drm_vblank_cleanup(dev);
-
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
- drm_rmmap(dev, r_list->map);
-
- drm_minor_unregister(dev, DRM_MINOR_LEGACY);
- drm_minor_unregister(dev, DRM_MINOR_RENDER);
- drm_minor_unregister(dev, DRM_MINOR_CONTROL);
-}
-EXPORT_SYMBOL(drm_dev_unregister);
-
-/**
- * drm_dev_set_unique - Set the unique name of a DRM device
- * @dev: device of which to set the unique name
- * @fmt: format string for unique name
- *
- * Sets the unique name of a DRM device using the specified format string and
- * a variable list of arguments. Drivers can use this at driver probe time if
- * the unique name of the devices they drive is static.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
-{
- va_list ap;
-
- kfree(dev->unique);
-
- va_start(ap, fmt);
- dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
- va_end(ap);
-
- return dev->unique ? 0 : -ENOMEM;
-}
-EXPORT_SYMBOL(drm_dev_set_unique);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 369b26278e76..ab1a5f6dde8a 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -438,7 +438,6 @@ err_out_files:
out:
return ret;
}
-EXPORT_SYMBOL(drm_sysfs_connector_add);
/**
* drm_sysfs_connector_remove - remove an connector device from sysfs
@@ -468,7 +467,6 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
device_unregister(connector->kdev);
connector->kdev = NULL;
}
-EXPORT_SYMBOL(drm_sysfs_connector_remove);
/**
* drm_sysfs_hotplug_event - generate a DRM uevent
@@ -495,71 +493,55 @@ static void drm_sysfs_release(struct device *dev)
}
/**
- * drm_sysfs_device_add - adds a class device to sysfs for a character driver
- * @dev: DRM device to be added
- * @head: DRM head in question
+ * drm_sysfs_minor_alloc() - Allocate sysfs device for given minor
+ * @minor: minor to allocate sysfs device for
*
- * Add a DRM device to the DRM's device model class. We use @dev's PCI device
- * as the parent for the Linux device, and make sure it has a file containing
- * the driver we're using (for userspace compatibility).
+ * This allocates a new sysfs device for @minor and returns it. The device is
+ * not registered nor linked. The caller has to use device_add() and
+ * device_del() to register and unregister it.
+ *
+ * Note that dev_get_drvdata() on the new device will return the minor.
+ * However, the device does not hold a ref-count to the minor nor to the
+ * underlying drm_device. This is unproblematic as long as you access the
+ * private data only in sysfs callbacks. device_del() disables those
+ * synchronously, so they cannot be called after you cleanup a minor.
*/
-int drm_sysfs_device_add(struct drm_minor *minor)
+struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
{
- char *minor_str;
+ const char *minor_str;
+ struct device *kdev;
int r;
if (minor->type == DRM_MINOR_CONTROL)
minor_str = "controlD%d";
- else if (minor->type == DRM_MINOR_RENDER)
- minor_str = "renderD%d";
- else
- minor_str = "card%d";
-
- minor->kdev = kzalloc(sizeof(*minor->kdev), GFP_KERNEL);
- if (!minor->kdev) {
- r = -ENOMEM;
- goto error;
- }
-
- device_initialize(minor->kdev);
- minor->kdev->devt = MKDEV(DRM_MAJOR, minor->index);
- minor->kdev->class = drm_class;
- minor->kdev->type = &drm_sysfs_device_minor;
- minor->kdev->parent = minor->dev->dev;
- minor->kdev->release = drm_sysfs_release;
- dev_set_drvdata(minor->kdev, minor);
-
- r = dev_set_name(minor->kdev, minor_str, minor->index);
+ else if (minor->type == DRM_MINOR_RENDER)
+ minor_str = "renderD%d";
+ else
+ minor_str = "card%d";
+
+ kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
+ if (!kdev)
+ return ERR_PTR(-ENOMEM);
+
+ device_initialize(kdev);
+ kdev->devt = MKDEV(DRM_MAJOR, minor->index);
+ kdev->class = drm_class;
+ kdev->type = &drm_sysfs_device_minor;
+ kdev->parent = minor->dev->dev;
+ kdev->release = drm_sysfs_release;
+ dev_set_drvdata(kdev, minor);
+
+ r = dev_set_name(kdev, minor_str, minor->index);
if (r < 0)
- goto error;
-
- r = device_add(minor->kdev);
- if (r < 0)
- goto error;
-
- return 0;
+ goto err_free;
-error:
- DRM_ERROR("device create failed %d\n", r);
- put_device(minor->kdev);
- return r;
-}
+ return kdev;
-/**
- * drm_sysfs_device_remove - remove DRM device
- * @dev: DRM device to remove
- *
- * This call unregisters and cleans up a class device that was created with a
- * call to drm_sysfs_device_add()
- */
-void drm_sysfs_device_remove(struct drm_minor *minor)
-{
- if (minor->kdev)
- device_unregister(minor->kdev);
- minor->kdev = NULL;
+err_free:
+ put_device(kdev);
+ return ERR_PTR(r);
}
-
/**
* drm_class_device_register - Register a struct device in the drm class.
*
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 178d2a9672a8..7f9f6f9e9b7e 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -28,6 +28,7 @@ config DRM_EXYNOS_FIMD
bool "Exynos DRM FIMD"
depends on DRM_EXYNOS && !FB_S3C
select FB_MODE_HELPERS
+ select MFD_SYSCON
help
Choose this option if you want to use Exynos FIMD for DRM.
@@ -52,6 +53,7 @@ config DRM_EXYNOS_DP
bool "EXYNOS DRM DP driver support"
depends on DRM_EXYNOS_FIMD && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
default DRM_EXYNOS
+ select DRM_PANEL
help
This enables support for DP device.
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index a8ffc8c1477b..4f3c7eb2d37d 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -16,7 +16,6 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/interrupt.h>
-#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/gpio.h>
@@ -28,6 +27,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
#include <drm/bridge/ptn3460.h>
#include "exynos_drm_drv.h"
@@ -41,7 +41,7 @@ struct bridge_init {
struct device_node *node;
};
-static int exynos_dp_init_dp(struct exynos_dp_device *dp)
+static void exynos_dp_init_dp(struct exynos_dp_device *dp)
{
exynos_dp_reset(dp);
@@ -58,8 +58,6 @@ static int exynos_dp_init_dp(struct exynos_dp_device *dp)
exynos_dp_init_hpd(dp);
exynos_dp_init_aux(dp);
-
- return 0;
}
static int exynos_dp_detect_hpd(struct exynos_dp_device *dp)
@@ -875,10 +873,24 @@ static irqreturn_t exynos_dp_irq_handler(int irq, void *arg)
static void exynos_dp_hotplug(struct work_struct *work)
{
struct exynos_dp_device *dp;
- int ret;
dp = container_of(work, struct exynos_dp_device, hotplug_work);
+ if (dp->drm_dev)
+ drm_helper_hpd_irq_event(dp->drm_dev);
+}
+
+static void exynos_dp_commit(struct exynos_drm_display *display)
+{
+ struct exynos_dp_device *dp = display->ctx;
+ int ret;
+
+ /* Keep the panel disabled while we configure video */
+ if (dp->panel) {
+ if (drm_panel_disable(dp->panel))
+ DRM_ERROR("failed to disable the panel\n");
+ }
+
ret = exynos_dp_detect_hpd(dp);
if (ret) {
/* Cable has been disconnected, we're done */
@@ -909,6 +921,12 @@ static void exynos_dp_hotplug(struct work_struct *work)
ret = exynos_dp_config_video(dp);
if (ret)
dev_err(dp->dev, "unable to config video\n");
+
+ /* Safe to enable the panel now */
+ if (dp->panel) {
+ if (drm_panel_enable(dp->panel))
+ DRM_ERROR("failed to enable the panel\n");
+ }
}
static enum drm_connector_status exynos_dp_detect(
@@ -933,15 +951,18 @@ static int exynos_dp_get_modes(struct drm_connector *connector)
struct exynos_dp_device *dp = ctx_from_connector(connector);
struct drm_display_mode *mode;
+ if (dp->panel)
+ return drm_panel_get_modes(dp->panel);
+
mode = drm_mode_create(connector->dev);
if (!mode) {
DRM_ERROR("failed to create a new display mode.\n");
return 0;
}
- drm_display_mode_from_videomode(&dp->panel.vm, mode);
- mode->width_mm = dp->panel.width_mm;
- mode->height_mm = dp->panel.height_mm;
+ drm_display_mode_from_videomode(&dp->priv.vm, mode);
+ mode->width_mm = dp->priv.width_mm;
+ mode->height_mm = dp->priv.height_mm;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
@@ -1018,10 +1039,13 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display,
}
drm_connector_helper_add(connector, &exynos_dp_connector_helper_funcs);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
- return 0;
+ if (dp->panel)
+ ret = drm_panel_attach(dp->panel, &dp->connector);
+
+ return ret;
}
static void exynos_dp_phy_init(struct exynos_dp_device *dp)
@@ -1050,26 +1074,50 @@ static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
}
}
-static void exynos_dp_poweron(struct exynos_dp_device *dp)
+static void exynos_dp_poweron(struct exynos_drm_display *display)
{
+ struct exynos_dp_device *dp = display->ctx;
+
if (dp->dpms_mode == DRM_MODE_DPMS_ON)
return;
+ if (dp->panel) {
+ if (drm_panel_prepare(dp->panel)) {
+ DRM_ERROR("failed to setup the panel\n");
+ return;
+ }
+ }
+
clk_prepare_enable(dp->clock);
exynos_dp_phy_init(dp);
exynos_dp_init_dp(dp);
enable_irq(dp->irq);
+ exynos_dp_commit(display);
}
-static void exynos_dp_poweroff(struct exynos_dp_device *dp)
+static void exynos_dp_poweroff(struct exynos_drm_display *display)
{
+ struct exynos_dp_device *dp = display->ctx;
+
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
+ if (dp->panel) {
+ if (drm_panel_disable(dp->panel)) {
+ DRM_ERROR("failed to disable the panel\n");
+ return;
+ }
+ }
+
disable_irq(dp->irq);
flush_work(&dp->hotplug_work);
exynos_dp_phy_exit(dp);
clk_disable_unprepare(dp->clock);
+
+ if (dp->panel) {
+ if (drm_panel_unprepare(dp->panel))
+ DRM_ERROR("failed to turnoff the panel\n");
+ }
}
static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
@@ -1078,12 +1126,12 @@ static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
- exynos_dp_poweron(dp);
+ exynos_dp_poweron(display);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- exynos_dp_poweroff(dp);
+ exynos_dp_poweroff(display);
break;
default:
break;
@@ -1094,6 +1142,7 @@ static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
static struct exynos_drm_display_ops exynos_dp_display_ops = {
.create_connector = exynos_dp_create_connector,
.dpms = exynos_dp_dpms,
+ .commit = exynos_dp_commit,
};
static struct exynos_drm_display exynos_dp_display = {
@@ -1201,7 +1250,7 @@ static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
{
int ret;
- ret = of_get_videomode(dp->dev->of_node, &dp->panel.vm,
+ ret = of_get_videomode(dp->dev->of_node, &dp->priv.vm,
OF_USE_NATIVE_MODE);
if (ret) {
DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
@@ -1215,16 +1264,10 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm_dev = data;
struct resource *res;
- struct exynos_dp_device *dp;
+ struct exynos_dp_device *dp = exynos_dp_display.ctx;
unsigned int irq_flags;
-
int ret = 0;
- dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
- GFP_KERNEL);
- if (!dp)
- return -ENOMEM;
-
dp->dev = &pdev->dev;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
@@ -1236,9 +1279,11 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- ret = exynos_dp_dt_parse_panel(dp);
- if (ret)
- return ret;
+ if (!dp->panel) {
+ ret = exynos_dp_dt_parse_panel(dp);
+ if (ret)
+ return ret;
+ }
dp->clock = devm_clk_get(&pdev->dev, "dp");
if (IS_ERR(dp->clock)) {
@@ -1298,7 +1343,6 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
disable_irq(dp->irq);
dp->drm_dev = drm_dev;
- exynos_dp_display.ctx = dp;
platform_set_drvdata(pdev, &exynos_dp_display);
@@ -1325,6 +1369,9 @@ static const struct component_ops exynos_dp_ops = {
static int exynos_dp_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *panel_node;
+ struct exynos_dp_device *dp;
int ret;
ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
@@ -1332,6 +1379,21 @@ static int exynos_dp_probe(struct platform_device *pdev)
if (ret)
return ret;
+ dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
+ GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ panel_node = of_parse_phandle(dev->of_node, "panel", 0);
+ if (panel_node) {
+ dp->panel = of_drm_find_panel(panel_node);
+ of_node_put(panel_node);
+ if (!dp->panel)
+ return -EPROBE_DEFER;
+ }
+
+ exynos_dp_display.ctx = dp;
+
ret = component_add(&pdev->dev, &exynos_dp_ops);
if (ret)
exynos_drm_component_del(&pdev->dev,
@@ -1376,6 +1438,7 @@ static const struct of_device_id exynos_dp_match[] = {
{ .compatible = "samsung,exynos5-dp" },
{},
};
+MODULE_DEVICE_TABLE(of, exynos_dp_match);
struct platform_driver dp_driver = {
.probe = exynos_dp_probe,
@@ -1390,4 +1453,4 @@ struct platform_driver dp_driver = {
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
MODULE_DESCRIPTION("Samsung SoC DP Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index 02cc4f9ab903..a1aee6931bd7 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -149,6 +149,7 @@ struct exynos_dp_device {
struct drm_device *drm_dev;
struct drm_connector connector;
struct drm_encoder *encoder;
+ struct drm_panel *panel;
struct clk *clock;
unsigned int irq;
void __iomem *reg_base;
@@ -162,7 +163,7 @@ struct exynos_dp_device {
int dpms_mode;
int hpd_gpio;
- struct exynos_drm_panel_info panel;
+ struct exynos_drm_panel_info priv;
};
/* exynos_dp_reg.c */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 9a16dbe121d1..ba9b3d5ed672 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -117,20 +117,7 @@ static struct drm_encoder *exynos_drm_best_encoder(
struct drm_device *dev = connector->dev;
struct exynos_drm_connector *exynos_connector =
to_exynos_connector(connector);
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
- obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
- DRM_MODE_OBJECT_ENCODER);
- if (!obj) {
- DRM_DEBUG_KMS("Unknown ENCODER ID %d\n",
- exynos_connector->encoder_id);
- return NULL;
- }
-
- encoder = obj_to_encoder(obj);
-
- return encoder;
+ return drm_encoder_find(dev, exynos_connector->encoder_id);
}
static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
@@ -185,7 +172,7 @@ static void exynos_drm_connector_destroy(struct drm_connector *connector)
struct exynos_drm_connector *exynos_connector =
to_exynos_connector(connector);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(exynos_connector);
}
@@ -230,7 +217,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
drm_connector_init(dev, connector, &exynos_connector_funcs, type);
drm_connector_helper_add(connector, &exynos_connector_helper_funcs);
- err = drm_sysfs_connector_add(connector);
+ err = drm_connector_register(connector);
if (err)
goto err_connector;
@@ -250,7 +237,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
return connector;
err_sysfs:
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
err_connector:
drm_connector_cleanup(connector);
kfree(exynos_connector);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 95c9435d0266..b68e58f78cd1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -69,8 +69,10 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
if (mode > DRM_MODE_DPMS_ON) {
/* wait for the completion of page flip. */
- wait_event(exynos_crtc->pending_flip_queue,
- atomic_read(&exynos_crtc->pending_flip) == 0);
+ if (!wait_event_timeout(exynos_crtc->pending_flip_queue,
+ !atomic_read(&exynos_crtc->pending_flip),
+ HZ/20))
+ atomic_set(&exynos_crtc->pending_flip, 0);
drm_vblank_off(crtc->dev, exynos_crtc->pipe);
}
@@ -259,6 +261,7 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
spin_lock_irq(&dev->event_lock);
drm_vblank_put(dev, exynos_crtc->pipe);
list_del(&event->base.link);
+ atomic_set(&exynos_crtc->pending_flip, 0);
spin_unlock_irq(&dev->event_lock);
goto out;
@@ -508,3 +511,11 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
return -EPERM;
}
+
+void exynos_drm_crtc_te_handler(struct drm_crtc *crtc)
+{
+ struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
+
+ if (manager->ops->te_handler)
+ manager->ops->te_handler(manager);
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 9f74b10a8a01..690dcddab725 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -36,4 +36,11 @@ void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos);
int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
unsigned int out_type);
+/*
+ * This function calls the crtc device(manager)'s te_handler() callback
+ * to trigger to transfer video image at the tearing effect synchronization
+ * signal.
+ */
+void exynos_drm_crtc_te_handler(struct drm_crtc *crtc);
+
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 9e530f205ad2..fa08f05e3e34 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -48,7 +48,7 @@ exynos_dpi_detect(struct drm_connector *connector, bool force)
static void exynos_dpi_connector_destroy(struct drm_connector *connector)
{
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
@@ -117,7 +117,7 @@ static int exynos_dpi_create_connector(struct exynos_drm_display *display,
}
drm_connector_helper_add(connector, &exynos_dpi_connector_helper_funcs);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
@@ -125,14 +125,18 @@ static int exynos_dpi_create_connector(struct exynos_drm_display *display,
static void exynos_dpi_poweron(struct exynos_dpi *ctx)
{
- if (ctx->panel)
+ if (ctx->panel) {
+ drm_panel_prepare(ctx->panel);
drm_panel_enable(ctx->panel);
+ }
}
static void exynos_dpi_poweroff(struct exynos_dpi *ctx)
{
- if (ctx->panel)
+ if (ctx->panel) {
drm_panel_disable(ctx->panel);
+ drm_panel_unprepare(ctx->panel);
+ }
}
static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ab7d182063c3..0d74e9b99c4e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -39,8 +39,6 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-#define VBLANK_OFF_DELAY 50000
-
static struct platform_device *exynos_drm_pdev;
static DEFINE_MUTEX(drm_component_lock);
@@ -103,8 +101,6 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
/* setup possible_clones. */
exynos_drm_encoder_setup(dev);
- drm_vblank_offdelay = VBLANK_OFF_DELAY;
-
platform_set_drvdata(dev->platformdev, dev);
/* Try to bind all sub drivers. */
@@ -362,7 +358,7 @@ static int exynos_drm_sys_suspend(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
pm_message_t message;
- if (pm_runtime_suspended(dev))
+ if (pm_runtime_suspended(dev) || !drm_dev)
return 0;
message.event = PM_EVENT_SUSPEND;
@@ -373,7 +369,7 @@ static int exynos_drm_sys_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- if (pm_runtime_suspended(dev))
+ if (pm_runtime_suspended(dev) || !drm_dev)
return 0;
return exynos_drm_resume(drm_dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 06cde4506278..69a6fa397d75 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -40,8 +40,6 @@ struct drm_device;
struct exynos_drm_overlay;
struct drm_connector;
-extern unsigned int drm_vblank_offdelay;
-
/* This enumerates device type. */
enum exynos_drm_device_type {
EXYNOS_DEVICE_TYPE_NONE,
@@ -188,6 +186,8 @@ struct exynos_drm_display {
* @win_commit: apply hardware specific overlay data to registers.
* @win_enable: enable hardware specific overlay.
* @win_disable: disable hardware specific overlay.
+ * @te_handler: trigger to transfer video image at the tearing effect
+ * synchronization signal if there is a page flip request.
*/
struct exynos_drm_manager;
struct exynos_drm_manager_ops {
@@ -206,6 +206,7 @@ struct exynos_drm_manager_ops {
void (*win_commit)(struct exynos_drm_manager *mgr, int zpos);
void (*win_enable)(struct exynos_drm_manager *mgr, int zpos);
void (*win_disable)(struct exynos_drm_manager *mgr, int zpos);
+ void (*te_handler)(struct exynos_drm_manager *mgr);
};
/*
@@ -236,14 +237,9 @@ struct exynos_drm_g2d_private {
struct list_head userptr_list;
};
-struct exynos_drm_ipp_private {
- struct device *dev;
- struct list_head event_list;
-};
-
struct drm_exynos_file_private {
struct exynos_drm_g2d_private *g2d_priv;
- struct exynos_drm_ipp_private *ipp_priv;
+ struct device *ipp_dev;
struct file *anon_filp;
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 6302aa64f6c1..442aa2d00132 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -16,7 +16,10 @@
#include <drm/drm_panel.h>
#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
#include <linux/irq.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
#include <linux/component.h>
@@ -24,6 +27,7 @@
#include <video/mipi_display.h>
#include <video/videomode.h>
+#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
/* returns true iff both arguments logically differs */
@@ -54,9 +58,12 @@
/* FIFO memory AC characteristic register */
#define DSIM_PLLCTRL_REG 0x4c /* PLL control register */
-#define DSIM_PLLTMR_REG 0x50 /* PLL timer register */
#define DSIM_PHYACCHR_REG 0x54 /* D-PHY AC characteristic register */
#define DSIM_PHYACCHR1_REG 0x58 /* D-PHY AC characteristic register1 */
+#define DSIM_PHYCTRL_REG 0x5c
+#define DSIM_PHYTIMING_REG 0x64
+#define DSIM_PHYTIMING1_REG 0x68
+#define DSIM_PHYTIMING2_REG 0x6c
/* DSIM_STATUS */
#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
@@ -200,6 +207,24 @@
#define DSIM_PLL_M(x) ((x) << 4)
#define DSIM_PLL_S(x) ((x) << 1)
+/* DSIM_PHYCTRL */
+#define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0)
+
+/* DSIM_PHYTIMING */
+#define DSIM_PHYTIMING_LPX(x) ((x) << 8)
+#define DSIM_PHYTIMING_HS_EXIT(x) ((x) << 0)
+
+/* DSIM_PHYTIMING1 */
+#define DSIM_PHYTIMING1_CLK_PREPARE(x) ((x) << 24)
+#define DSIM_PHYTIMING1_CLK_ZERO(x) ((x) << 16)
+#define DSIM_PHYTIMING1_CLK_POST(x) ((x) << 8)
+#define DSIM_PHYTIMING1_CLK_TRAIL(x) ((x) << 0)
+
+/* DSIM_PHYTIMING2 */
+#define DSIM_PHYTIMING2_HS_PREPARE(x) ((x) << 16)
+#define DSIM_PHYTIMING2_HS_ZERO(x) ((x) << 8)
+#define DSIM_PHYTIMING2_HS_TRAIL(x) ((x) << 0)
+
#define DSI_MAX_BUS_WIDTH 4
#define DSI_NUM_VIRTUAL_CHANNELS 4
#define DSI_TX_FIFO_SIZE 2048
@@ -233,6 +258,12 @@ struct exynos_dsi_transfer {
#define DSIM_STATE_INITIALIZED BIT(1)
#define DSIM_STATE_CMD_LPM BIT(2)
+struct exynos_dsi_driver_data {
+ unsigned int plltmr_reg;
+
+ unsigned int has_freqband:1;
+};
+
struct exynos_dsi {
struct mipi_dsi_host dsi_host;
struct drm_connector connector;
@@ -247,6 +278,7 @@ struct exynos_dsi {
struct clk *bus_clk;
struct regulator_bulk_data supplies[2];
int irq;
+ int te_gpio;
u32 pll_clk_rate;
u32 burst_clk_rate;
@@ -262,11 +294,39 @@ struct exynos_dsi {
spinlock_t transfer_lock; /* protects transfer_list */
struct list_head transfer_list;
+
+ struct exynos_dsi_driver_data *driver_data;
};
#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
+static struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
+ .plltmr_reg = 0x50,
+ .has_freqband = 1,
+};
+
+static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
+ .plltmr_reg = 0x58,
+};
+
+static struct of_device_id exynos_dsi_of_match[] = {
+ { .compatible = "samsung,exynos4210-mipi-dsi",
+ .data = &exynos4_dsi_driver_data },
+ { .compatible = "samsung,exynos5410-mipi-dsi",
+ .data = &exynos5_dsi_driver_data },
+ { }
+};
+
+static inline struct exynos_dsi_driver_data *exynos_dsi_get_driver_data(
+ struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(exynos_dsi_of_match, &pdev->dev);
+
+ return (struct exynos_dsi_driver_data *)of_id->data;
+}
+
static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi)
{
if (wait_for_completion_timeout(&dsi->completed, msecs_to_jiffies(300)))
@@ -340,14 +400,9 @@ static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
unsigned long freq)
{
- static const unsigned long freq_bands[] = {
- 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
- 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
- 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
- 770 * MHZ, 870 * MHZ, 950 * MHZ,
- };
+ struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
unsigned long fin, fout;
- int timeout, band;
+ int timeout;
u8 p, s;
u16 m;
u32 reg;
@@ -368,18 +423,30 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
"failed to find PLL PMS for requested frequency\n");
return -EFAULT;
}
+ dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s);
- for (band = 0; band < ARRAY_SIZE(freq_bands); ++band)
- if (fout < freq_bands[band])
- break;
+ writel(500, dsi->reg_base + driver_data->plltmr_reg);
+
+ reg = DSIM_PLL_EN | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s);
- dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d), band %d\n", fout,
- p, m, s, band);
+ if (driver_data->has_freqband) {
+ static const unsigned long freq_bands[] = {
+ 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ,
+ 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ,
+ 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ,
+ 770 * MHZ, 870 * MHZ, 950 * MHZ,
+ };
+ int band;
- writel(500, dsi->reg_base + DSIM_PLLTMR_REG);
+ for (band = 0; band < ARRAY_SIZE(freq_bands); ++band)
+ if (fout < freq_bands[band])
+ break;
+
+ dev_dbg(dsi->dev, "band %d\n", band);
+
+ reg |= DSIM_FREQ_BAND(band);
+ }
- reg = DSIM_FREQ_BAND(band) | DSIM_PLL_EN
- | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s);
writel(reg, dsi->reg_base + DSIM_PLLCTRL_REG);
timeout = 1000;
@@ -433,6 +500,59 @@ static int exynos_dsi_enable_clock(struct exynos_dsi *dsi)
return 0;
}
+static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
+{
+ struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
+ u32 reg;
+
+ if (driver_data->has_freqband)
+ return;
+
+ /* B D-PHY: D-PHY Master & Slave Analog Block control */
+ reg = DSIM_PHYCTRL_ULPS_EXIT(0x0af);
+ writel(reg, dsi->reg_base + DSIM_PHYCTRL_REG);
+
+ /*
+ * T LPX: Transmitted length of any Low-Power state period
+ * T HS-EXIT: Time that the transmitter drives LP-11 following a HS
+ * burst
+ */
+ reg = DSIM_PHYTIMING_LPX(0x06) | DSIM_PHYTIMING_HS_EXIT(0x0b);
+ writel(reg, dsi->reg_base + DSIM_PHYTIMING_REG);
+
+ /*
+ * T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00
+ * Line state immediately before the HS-0 Line state starting the
+ * HS transmission
+ * T CLK-ZERO: Time that the transmitter drives the HS-0 state prior to
+ * transmitting the Clock.
+ * T CLK_POST: Time that the transmitter continues to send HS clock
+ * after the last associated Data Lane has transitioned to LP Mode
+ * Interval is defined as the period from the end of T HS-TRAIL to
+ * the beginning of T CLK-TRAIL
+ * T CLK-TRAIL: Time that the transmitter drives the HS-0 state after
+ * the last payload clock bit of a HS transmission burst
+ */
+ reg = DSIM_PHYTIMING1_CLK_PREPARE(0x07) |
+ DSIM_PHYTIMING1_CLK_ZERO(0x27) |
+ DSIM_PHYTIMING1_CLK_POST(0x0d) |
+ DSIM_PHYTIMING1_CLK_TRAIL(0x08);
+ writel(reg, dsi->reg_base + DSIM_PHYTIMING1_REG);
+
+ /*
+ * T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00
+ * Line state immediately before the HS-0 Line state starting the
+ * HS transmission
+ * T HS-ZERO: Time that the transmitter drives the HS-0 state prior to
+ * transmitting the Sync sequence.
+ * T HS-TRAIL: Time that the transmitter drives the flipped differential
+ * state after last payload data bit of a HS transmission burst
+ */
+ reg = DSIM_PHYTIMING2_HS_PREPARE(0x09) | DSIM_PHYTIMING2_HS_ZERO(0x0d) |
+ DSIM_PHYTIMING2_HS_TRAIL(0x0b);
+ writel(reg, dsi->reg_base + DSIM_PHYTIMING2_REG);
+}
+
static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
{
u32 reg;
@@ -468,13 +588,20 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
/* DSI configuration */
reg = 0;
+ /*
+ * The first bit of mode_flags specifies display configuration.
+ * If this bit is set[= MIPI_DSI_MODE_VIDEO], dsi will support video
+ * mode, otherwise it will support command mode.
+ */
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
reg |= DSIM_VIDEO_MODE;
+ /*
+ * The user manual describes that following bits are ignored in
+ * command mode.
+ */
if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH))
reg |= DSIM_MFLUSH_VS;
- if (!(dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
- reg |= DSIM_EOT_DISABLE;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
reg |= DSIM_SYNC_INFORM;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
@@ -491,6 +618,9 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
reg |= DSIM_HSA_MODE;
}
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
+ reg |= DSIM_EOT_DISABLE;
+
switch (dsi->format) {
case MIPI_DSI_FMT_RGB888:
reg |= DSIM_MAIN_PIX_FORMAT_RGB888;
@@ -944,17 +1074,90 @@ static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id)
+{
+ struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id;
+ struct drm_encoder *encoder = dsi->encoder;
+
+ if (dsi->state & DSIM_STATE_ENABLED)
+ exynos_drm_crtc_te_handler(encoder->crtc);
+
+ return IRQ_HANDLED;
+}
+
+static void exynos_dsi_enable_irq(struct exynos_dsi *dsi)
+{
+ enable_irq(dsi->irq);
+
+ if (gpio_is_valid(dsi->te_gpio))
+ enable_irq(gpio_to_irq(dsi->te_gpio));
+}
+
+static void exynos_dsi_disable_irq(struct exynos_dsi *dsi)
+{
+ if (gpio_is_valid(dsi->te_gpio))
+ disable_irq(gpio_to_irq(dsi->te_gpio));
+
+ disable_irq(dsi->irq);
+}
+
static int exynos_dsi_init(struct exynos_dsi *dsi)
{
- exynos_dsi_enable_clock(dsi);
exynos_dsi_reset(dsi);
- enable_irq(dsi->irq);
+ exynos_dsi_enable_irq(dsi);
+ exynos_dsi_enable_clock(dsi);
exynos_dsi_wait_for_reset(dsi);
+ exynos_dsi_set_phy_ctrl(dsi);
exynos_dsi_init_link(dsi);
return 0;
}
+static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
+{
+ int ret;
+
+ dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
+ if (!gpio_is_valid(dsi->te_gpio)) {
+ dev_err(dsi->dev, "no te-gpios specified\n");
+ ret = dsi->te_gpio;
+ goto out;
+ }
+
+ ret = gpio_request_one(dsi->te_gpio, GPIOF_IN, "te_gpio");
+ if (ret) {
+ dev_err(dsi->dev, "gpio request failed with %d\n", ret);
+ goto out;
+ }
+
+ /*
+ * This TE GPIO IRQ should not be set to IRQ_NOAUTOEN, because panel
+ * calls drm_panel_init() first then calls mipi_dsi_attach() in probe().
+ * It means that te_gpio is invalid when exynos_dsi_enable_irq() is
+ * called by drm_panel_init() before panel is attached.
+ */
+ ret = request_threaded_irq(gpio_to_irq(dsi->te_gpio),
+ exynos_dsi_te_irq_handler, NULL,
+ IRQF_TRIGGER_RISING, "TE", dsi);
+ if (ret) {
+ dev_err(dsi->dev, "request interrupt failed with %d\n", ret);
+ gpio_free(dsi->te_gpio);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static void exynos_dsi_unregister_te_irq(struct exynos_dsi *dsi)
+{
+ if (gpio_is_valid(dsi->te_gpio)) {
+ free_irq(gpio_to_irq(dsi->te_gpio), dsi);
+ gpio_free(dsi->te_gpio);
+ dsi->te_gpio = -ENOENT;
+ }
+}
+
static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
@@ -968,6 +1171,19 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
if (dsi->connector.dev)
drm_helper_hpd_irq_event(dsi->connector.dev);
+ /*
+ * This is a temporary solution and should be made by more generic way.
+ *
+ * If attached panel device is for command mode one, dsi should register
+ * TE interrupt handler.
+ */
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+ int ret = exynos_dsi_register_te_irq(dsi);
+
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -976,6 +1192,8 @@ static int exynos_dsi_host_detach(struct mipi_dsi_host *host,
{
struct exynos_dsi *dsi = host_to_dsi(host);
+ exynos_dsi_unregister_te_irq(dsi);
+
dsi->panel_node = NULL;
if (dsi->connector.dev)
@@ -1089,7 +1307,7 @@ static void exynos_dsi_poweroff(struct exynos_dsi *dsi)
exynos_dsi_disable_clock(dsi);
- disable_irq(dsi->irq);
+ exynos_dsi_disable_irq(dsi);
}
dsi->state &= ~DSIM_STATE_CMD_LPM;
@@ -1115,7 +1333,7 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
if (ret < 0)
return ret;
- ret = drm_panel_enable(dsi->panel);
+ ret = drm_panel_prepare(dsi->panel);
if (ret < 0) {
exynos_dsi_poweroff(dsi);
return ret;
@@ -1124,6 +1342,14 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
exynos_dsi_set_display_mode(dsi);
exynos_dsi_set_display_enable(dsi, true);
+ ret = drm_panel_enable(dsi->panel);
+ if (ret < 0) {
+ exynos_dsi_set_display_enable(dsi, false);
+ drm_panel_unprepare(dsi->panel);
+ exynos_dsi_poweroff(dsi);
+ return ret;
+ }
+
dsi->state |= DSIM_STATE_ENABLED;
return 0;
@@ -1134,8 +1360,9 @@ static void exynos_dsi_disable(struct exynos_dsi *dsi)
if (!(dsi->state & DSIM_STATE_ENABLED))
return;
- exynos_dsi_set_display_enable(dsi, false);
drm_panel_disable(dsi->panel);
+ exynos_dsi_set_display_enable(dsi, false);
+ drm_panel_unprepare(dsi->panel);
exynos_dsi_poweroff(dsi);
dsi->state &= ~DSIM_STATE_ENABLED;
@@ -1246,7 +1473,7 @@ static int exynos_dsi_create_connector(struct exynos_drm_display *display,
}
drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
@@ -1278,6 +1505,7 @@ static struct exynos_drm_display exynos_dsi_display = {
.type = EXYNOS_DISPLAY_TYPE_LCD,
.ops = &exynos_dsi_display_ops,
};
+MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
/* of_* functions will be removed after merge of of_graph patches */
static struct device_node *
@@ -1435,6 +1663,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
goto err_del_component;
}
+ /* To be checked as invalid one */
+ dsi->te_gpio = -ENOENT;
+
init_completion(&dsi->completed);
spin_lock_init(&dsi->transfer_lock);
INIT_LIST_HEAD(&dsi->transfer_list);
@@ -1443,6 +1674,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->dsi_host.dev = &pdev->dev;
dsi->dev = &pdev->dev;
+ dsi->driver_data = exynos_dsi_get_driver_data(pdev);
ret = exynos_dsi_parse_dt(dsi);
if (ret)
@@ -1525,11 +1757,6 @@ static int exynos_dsi_remove(struct platform_device *pdev)
return 0;
}
-static struct of_device_id exynos_dsi_of_match[] = {
- { .compatible = "samsung,exynos4210-mipi-dsi" },
- { }
-};
-
struct platform_driver dsi_driver = {
.probe = exynos_dsi_probe,
.remove = exynos_dsi_remove,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index d771b467cf0c..32e63f60e1d1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -225,7 +225,7 @@ out:
return ret;
}
-static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
.fb_probe = exynos_drm_fbdev_create,
};
@@ -266,7 +266,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
return -ENOMEM;
private->fb_helper = helper = &fbdev->drm_fb_helper;
- helper->funcs = &exynos_drm_fb_helper_funcs;
+
+ drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs);
num_crtc = dev->mode_config.num_crtc;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 831dde9034c6..ec7cc9ea50df 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1887,6 +1887,7 @@ static const struct of_device_id fimc_of_match[] = {
{ .compatible = "samsung,exynos4212-fimc" },
{ },
};
+MODULE_DEVICE_TABLE(of, fimc_of_match);
struct platform_driver fimc_driver = {
.probe = fimc_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 33161ad38201..5d09e33fef87 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -20,6 +20,8 @@
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/component.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
@@ -61,6 +63,24 @@
/* color key value register for hardware window 1 ~ 4. */
#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + ((x - 1) * 8))
+/* I80 / RGB trigger control register */
+#define TRIGCON 0x1A4
+#define TRGMODE_I80_RGB_ENABLE_I80 (1 << 0)
+#define SWTRGCMD_I80_RGB_ENABLE (1 << 1)
+
+/* display mode change control register except exynos4 */
+#define VIDOUT_CON 0x000
+#define VIDOUT_CON_F_I80_LDI0 (0x2 << 8)
+
+/* I80 interface control for main LDI register */
+#define I80IFCONFAx(x) (0x1B0 + (x) * 4)
+#define I80IFCONFBx(x) (0x1B8 + (x) * 4)
+#define LCD_CS_SETUP(x) ((x) << 16)
+#define LCD_WR_SETUP(x) ((x) << 12)
+#define LCD_WR_ACTIVE(x) ((x) << 8)
+#define LCD_WR_HOLD(x) ((x) << 4)
+#define I80IFEN_ENABLE (1 << 0)
+
/* FIMD has totally five hardware windows. */
#define WINDOWS_NR 5
@@ -68,10 +88,14 @@
struct fimd_driver_data {
unsigned int timing_base;
+ unsigned int lcdblk_offset;
+ unsigned int lcdblk_vt_shift;
+ unsigned int lcdblk_bypass_shift;
unsigned int has_shadowcon:1;
unsigned int has_clksel:1;
unsigned int has_limited_fmt:1;
+ unsigned int has_vidoutcon:1;
};
static struct fimd_driver_data s3c64xx_fimd_driver_data = {
@@ -82,12 +106,19 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = {
static struct fimd_driver_data exynos4_fimd_driver_data = {
.timing_base = 0x0,
+ .lcdblk_offset = 0x210,
+ .lcdblk_vt_shift = 10,
+ .lcdblk_bypass_shift = 1,
.has_shadowcon = 1,
};
static struct fimd_driver_data exynos5_fimd_driver_data = {
.timing_base = 0x20000,
+ .lcdblk_offset = 0x214,
+ .lcdblk_vt_shift = 24,
+ .lcdblk_bypass_shift = 15,
.has_shadowcon = 1,
+ .has_vidoutcon = 1,
};
struct fimd_win_data {
@@ -112,15 +143,22 @@ struct fimd_context {
struct clk *bus_clk;
struct clk *lcd_clk;
void __iomem *regs;
+ struct regmap *sysreg;
struct drm_display_mode mode;
struct fimd_win_data win_data[WINDOWS_NR];
unsigned int default_win;
unsigned long irq_flags;
+ u32 vidcon0;
u32 vidcon1;
+ u32 vidout_con;
+ u32 i80ifcon;
+ bool i80_if;
bool suspended;
int pipe;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
+ atomic_t win_updated;
+ atomic_t triggering;
struct exynos_drm_panel_info panel;
struct fimd_driver_data *driver_data;
@@ -136,6 +174,7 @@ static const struct of_device_id fimd_driver_dt_match[] = {
.data = &exynos5_fimd_driver_data },
{},
};
+MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
static inline struct fimd_driver_data *drm_fimd_get_driver_data(
struct platform_device *pdev)
@@ -243,6 +282,14 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh;
u32 clkdiv;
+ if (ctx->i80_if) {
+ /*
+ * The frame done interrupt should be occurred prior to the
+ * next TE signal.
+ */
+ ideal_clk *= 2;
+ }
+
/* Find the clock divider value that gets us closest to ideal_clk */
clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->lcd_clk), ideal_clk);
@@ -271,11 +318,10 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
{
struct fimd_context *ctx = mgr->ctx;
struct drm_display_mode *mode = &ctx->mode;
- struct fimd_driver_data *driver_data;
- u32 val, clkdiv, vidcon1;
- int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd;
+ struct fimd_driver_data *driver_data = ctx->driver_data;
+ void *timing_base = ctx->regs + driver_data->timing_base;
+ u32 val, clkdiv;
- driver_data = ctx->driver_data;
if (ctx->suspended)
return;
@@ -283,33 +329,65 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
if (mode->htotal == 0 || mode->vtotal == 0)
return;
- /* setup polarity values */
- vidcon1 = ctx->vidcon1;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- vidcon1 |= VIDCON1_INV_VSYNC;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- vidcon1 |= VIDCON1_INV_HSYNC;
- writel(vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
-
- /* setup vertical timing values. */
- vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
- vbpd = mode->crtc_vtotal - mode->crtc_vsync_end;
- vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay;
-
- val = VIDTCON0_VBPD(vbpd - 1) |
- VIDTCON0_VFPD(vfpd - 1) |
- VIDTCON0_VSPW(vsync_len - 1);
- writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
-
- /* setup horizontal timing values. */
- hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
- hbpd = mode->crtc_htotal - mode->crtc_hsync_end;
- hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay;
-
- val = VIDTCON1_HBPD(hbpd - 1) |
- VIDTCON1_HFPD(hfpd - 1) |
- VIDTCON1_HSPW(hsync_len - 1);
- writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
+ if (ctx->i80_if) {
+ val = ctx->i80ifcon | I80IFEN_ENABLE;
+ writel(val, timing_base + I80IFCONFAx(0));
+
+ /* disable auto frame rate */
+ writel(0, timing_base + I80IFCONFBx(0));
+
+ /* set video type selection to I80 interface */
+ if (ctx->sysreg && regmap_update_bits(ctx->sysreg,
+ driver_data->lcdblk_offset,
+ 0x3 << driver_data->lcdblk_vt_shift,
+ 0x1 << driver_data->lcdblk_vt_shift)) {
+ DRM_ERROR("Failed to update sysreg for I80 i/f.\n");
+ return;
+ }
+ } else {
+ int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd;
+ u32 vidcon1;
+
+ /* setup polarity values */
+ vidcon1 = ctx->vidcon1;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ vidcon1 |= VIDCON1_INV_VSYNC;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ vidcon1 |= VIDCON1_INV_HSYNC;
+ writel(vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
+
+ /* setup vertical timing values. */
+ vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vbpd = mode->crtc_vtotal - mode->crtc_vsync_end;
+ vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay;
+
+ val = VIDTCON0_VBPD(vbpd - 1) |
+ VIDTCON0_VFPD(vfpd - 1) |
+ VIDTCON0_VSPW(vsync_len - 1);
+ writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
+
+ /* setup horizontal timing values. */
+ hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ hbpd = mode->crtc_htotal - mode->crtc_hsync_end;
+ hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay;
+
+ val = VIDTCON1_HBPD(hbpd - 1) |
+ VIDTCON1_HFPD(hfpd - 1) |
+ VIDTCON1_HSPW(hsync_len - 1);
+ writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
+ }
+
+ if (driver_data->has_vidoutcon)
+ writel(ctx->vidout_con, timing_base + VIDOUT_CON);
+
+ /* set bypass selection */
+ if (ctx->sysreg && regmap_update_bits(ctx->sysreg,
+ driver_data->lcdblk_offset,
+ 0x1 << driver_data->lcdblk_bypass_shift,
+ 0x1 << driver_data->lcdblk_bypass_shift)) {
+ DRM_ERROR("Failed to update sysreg for bypass setting.\n");
+ return;
+ }
/* setup horizontal and vertical display size. */
val = VIDTCON2_LINEVAL(mode->vdisplay - 1) |
@@ -322,7 +400,8 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
* fields of register with prefix '_F' would be updated
* at vsync(same as dma start)
*/
- val = VIDCON0_ENVID | VIDCON0_ENVID_F;
+ val = ctx->vidcon0;
+ val |= VIDCON0_ENVID | VIDCON0_ENVID_F;
if (ctx->driver_data->has_clksel)
val |= VIDCON0_CLKSEL_LCD;
@@ -660,6 +739,9 @@ static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
}
win_data->enabled = true;
+
+ if (ctx->i80_if)
+ atomic_set(&ctx->win_updated, 1);
}
static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
@@ -838,6 +920,58 @@ static void fimd_dpms(struct exynos_drm_manager *mgr, int mode)
}
}
+static void fimd_trigger(struct device *dev)
+{
+ struct exynos_drm_manager *mgr = get_fimd_manager(dev);
+ struct fimd_context *ctx = mgr->ctx;
+ struct fimd_driver_data *driver_data = ctx->driver_data;
+ void *timing_base = ctx->regs + driver_data->timing_base;
+ u32 reg;
+
+ atomic_set(&ctx->triggering, 1);
+
+ reg = readl(ctx->regs + VIDINTCON0);
+ reg |= (VIDINTCON0_INT_ENABLE | VIDINTCON0_INT_I80IFDONE |
+ VIDINTCON0_INT_SYSMAINCON);
+ writel(reg, ctx->regs + VIDINTCON0);
+
+ reg = readl(timing_base + TRIGCON);
+ reg |= (TRGMODE_I80_RGB_ENABLE_I80 | SWTRGCMD_I80_RGB_ENABLE);
+ writel(reg, timing_base + TRIGCON);
+}
+
+static void fimd_te_handler(struct exynos_drm_manager *mgr)
+{
+ struct fimd_context *ctx = mgr->ctx;
+
+ /* Checks the crtc is detached already from encoder */
+ if (ctx->pipe < 0 || !ctx->drm_dev)
+ return;
+
+ /*
+ * Skips to trigger if in triggering state, because multiple triggering
+ * requests can cause panel reset.
+ */
+ if (atomic_read(&ctx->triggering))
+ return;
+
+ /*
+ * If there is a page flip request, triggers and handles the page flip
+ * event so that current fb can be updated into panel GRAM.
+ */
+ if (atomic_add_unless(&ctx->win_updated, -1, 0))
+ fimd_trigger(ctx->dev);
+
+ /* Wakes up vsync event queue */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ wake_up(&ctx->wait_vsync_queue);
+
+ if (!atomic_read(&ctx->triggering))
+ drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ }
+}
+
static struct exynos_drm_manager_ops fimd_manager_ops = {
.dpms = fimd_dpms,
.mode_fixup = fimd_mode_fixup,
@@ -849,6 +983,7 @@ static struct exynos_drm_manager_ops fimd_manager_ops = {
.win_mode_set = fimd_win_mode_set,
.win_commit = fimd_win_commit,
.win_disable = fimd_win_disable,
+ .te_handler = fimd_te_handler,
};
static struct exynos_drm_manager fimd_manager = {
@@ -859,26 +994,40 @@ static struct exynos_drm_manager fimd_manager = {
static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
{
struct fimd_context *ctx = (struct fimd_context *)dev_id;
- u32 val;
+ u32 val, clear_bit;
val = readl(ctx->regs + VIDINTCON1);
- if (val & VIDINTCON1_INT_FRAME)
- /* VSYNC interrupt */
- writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
+ clear_bit = ctx->i80_if ? VIDINTCON1_INT_I80 : VIDINTCON1_INT_FRAME;
+ if (val & clear_bit)
+ writel(clear_bit, ctx->regs + VIDINTCON1);
/* check the crtc is detached already from encoder */
if (ctx->pipe < 0 || !ctx->drm_dev)
goto out;
- drm_handle_vblank(ctx->drm_dev, ctx->pipe);
- exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+ if (ctx->i80_if) {
+ /* unset I80 frame done interrupt */
+ val = readl(ctx->regs + VIDINTCON0);
+ val &= ~(VIDINTCON0_INT_I80IFDONE | VIDINTCON0_INT_SYSMAINCON);
+ writel(val, ctx->regs + VIDINTCON0);
- /* set wait vsync event to zero and wake up queue. */
- if (atomic_read(&ctx->wait_vsync_event)) {
- atomic_set(&ctx->wait_vsync_event, 0);
- wake_up(&ctx->wait_vsync_queue);
+ /* exit triggering mode */
+ atomic_set(&ctx->triggering, 0);
+
+ drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+ } else {
+ drm_handle_vblank(ctx->drm_dev, ctx->pipe);
+ exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
+
+ /* set wait vsync event to zero and wake up queue. */
+ if (atomic_read(&ctx->wait_vsync_event)) {
+ atomic_set(&ctx->wait_vsync_event, 0);
+ wake_up(&ctx->wait_vsync_queue);
+ }
}
+
out:
return IRQ_HANDLED;
}
@@ -923,6 +1072,7 @@ static int fimd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimd_context *ctx;
+ struct device_node *i80_if_timings;
struct resource *res;
int ret = -EINVAL;
@@ -944,12 +1094,51 @@ static int fimd_probe(struct platform_device *pdev)
ctx->dev = dev;
ctx->suspended = true;
+ ctx->driver_data = drm_fimd_get_driver_data(pdev);
if (of_property_read_bool(dev->of_node, "samsung,invert-vden"))
ctx->vidcon1 |= VIDCON1_INV_VDEN;
if (of_property_read_bool(dev->of_node, "samsung,invert-vclk"))
ctx->vidcon1 |= VIDCON1_INV_VCLK;
+ i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
+ if (i80_if_timings) {
+ u32 val;
+
+ ctx->i80_if = true;
+
+ if (ctx->driver_data->has_vidoutcon)
+ ctx->vidout_con |= VIDOUT_CON_F_I80_LDI0;
+ else
+ ctx->vidcon0 |= VIDCON0_VIDOUT_I80_LDI0;
+ /*
+ * The user manual describes that this "DSI_EN" bit is required
+ * to enable I80 24-bit data interface.
+ */
+ ctx->vidcon0 |= VIDCON0_DSI_EN;
+
+ if (of_property_read_u32(i80_if_timings, "cs-setup", &val))
+ val = 0;
+ ctx->i80ifcon = LCD_CS_SETUP(val);
+ if (of_property_read_u32(i80_if_timings, "wr-setup", &val))
+ val = 0;
+ ctx->i80ifcon |= LCD_WR_SETUP(val);
+ if (of_property_read_u32(i80_if_timings, "wr-active", &val))
+ val = 1;
+ ctx->i80ifcon |= LCD_WR_ACTIVE(val);
+ if (of_property_read_u32(i80_if_timings, "wr-hold", &val))
+ val = 0;
+ ctx->i80ifcon |= LCD_WR_HOLD(val);
+ }
+ of_node_put(i80_if_timings);
+
+ ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "samsung,sysreg");
+ if (IS_ERR(ctx->sysreg)) {
+ dev_warn(dev, "failed to get system register.\n");
+ ctx->sysreg = NULL;
+ }
+
ctx->bus_clk = devm_clk_get(dev, "fimd");
if (IS_ERR(ctx->bus_clk)) {
dev_err(dev, "failed to get bus clock\n");
@@ -972,7 +1161,8 @@ static int fimd_probe(struct platform_device *pdev)
goto err_del_component;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vsync");
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ ctx->i80_if ? "lcd_sys" : "vsync");
if (!res) {
dev_err(dev, "irq request failed.\n");
ret = -ENXIO;
@@ -986,7 +1176,6 @@ static int fimd_probe(struct platform_device *pdev)
goto err_del_component;
}
- ctx->driver_data = drm_fimd_get_driver_data(pdev);
init_waitqueue_head(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 800158714473..df7a77d3eff8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1042,8 +1042,23 @@ err:
int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
+ struct drm_exynos_file_private *file_priv = file->driver_priv;
+ struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct device *dev;
+ struct g2d_data *g2d;
struct drm_exynos_g2d_get_ver *ver = data;
+ if (!g2d_priv)
+ return -ENODEV;
+
+ dev = g2d_priv->dev;
+ if (!dev)
+ return -ENODEV;
+
+ g2d = dev_get_drvdata(dev);
+ if (!g2d)
+ return -EFAULT;
+
ver->major = G2D_HW_MAJOR_VER;
ver->minor = G2D_HW_MINOR_VER;
@@ -1056,7 +1071,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct device *dev = g2d_priv->dev;
+ struct device *dev;
struct g2d_data *g2d;
struct drm_exynos_g2d_set_cmdlist *req = data;
struct drm_exynos_g2d_cmd *cmd;
@@ -1067,6 +1082,10 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
int size;
int ret;
+ if (!g2d_priv)
+ return -ENODEV;
+
+ dev = g2d_priv->dev;
if (!dev)
return -ENODEV;
@@ -1223,13 +1242,17 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct device *dev = g2d_priv->dev;
+ struct device *dev;
struct g2d_data *g2d;
struct drm_exynos_g2d_exec *req = data;
struct g2d_runqueue_node *runqueue_node;
struct list_head *run_cmdlist;
struct list_head *event_list;
+ if (!g2d_priv)
+ return -ENODEV;
+
+ dev = g2d_priv->dev;
if (!dev)
return -ENODEV;
@@ -1544,8 +1567,10 @@ static const struct dev_pm_ops g2d_pm_ops = {
static const struct of_device_id exynos_g2d_match[] = {
{ .compatible = "samsung,exynos5250-g2d" },
+ { .compatible = "samsung,exynos4212-g2d" },
{},
};
+MODULE_DEVICE_TABLE(of, exynos_g2d_match);
struct platform_driver g2d_driver = {
.probe = g2d_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 163a054922cb..15db80138382 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -301,7 +301,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
unsigned int gem_handle,
struct drm_file *filp)
{
- struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(dev, filp, gem_handle);
@@ -310,8 +309,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
return;
}
- exynos_gem_obj = to_exynos_gem_obj(obj);
-
drm_gem_object_unreference_unlocked(obj);
/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index a1888e128f1d..c411399070d6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -129,9 +129,6 @@ void exynos_platform_device_ipp_unregister(void)
int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
{
- if (!ippdrv)
- return -EINVAL;
-
mutex_lock(&exynos_drm_ippdrv_lock);
list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
mutex_unlock(&exynos_drm_ippdrv_lock);
@@ -141,9 +138,6 @@ int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
{
- if (!ippdrv)
- return -EINVAL;
-
mutex_lock(&exynos_drm_ippdrv_lock);
list_del(&ippdrv->drv_list);
mutex_unlock(&exynos_drm_ippdrv_lock);
@@ -151,20 +145,15 @@ int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
return 0;
}
-static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
- u32 *idp)
+static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj)
{
int ret;
- /* do the allocation under our mutexlock */
mutex_lock(lock);
ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
mutex_unlock(lock);
- if (ret < 0)
- return ret;
- *idp = ret;
- return 0;
+ return ret;
}
static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
@@ -178,35 +167,25 @@ static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
{
void *obj;
- DRM_DEBUG_KMS("id[%d]\n", id);
-
mutex_lock(lock);
-
- /* find object using handle */
obj = idr_find(id_idr, id);
- if (!obj) {
- DRM_ERROR("failed to find object.\n");
- mutex_unlock(lock);
- return ERR_PTR(-ENODEV);
- }
-
mutex_unlock(lock);
return obj;
}
-static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
- enum drm_exynos_ipp_cmd cmd)
+static int ipp_check_driver(struct exynos_drm_ippdrv *ippdrv,
+ struct drm_exynos_ipp_property *property)
{
- /*
- * check dedicated flag and WB, OUTPUT operation with
- * power on state.
- */
- if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
- !pm_runtime_suspended(ippdrv->dev)))
- return true;
+ if (ippdrv->dedicated || (!ipp_is_m2m_cmd(property->cmd) &&
+ !pm_runtime_suspended(ippdrv->dev)))
+ return -EBUSY;
- return false;
+ if (ippdrv->check_property &&
+ ippdrv->check_property(ippdrv->dev, property))
+ return -EINVAL;
+
+ return 0;
}
static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
@@ -214,62 +193,30 @@ static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
{
struct exynos_drm_ippdrv *ippdrv;
u32 ipp_id = property->ipp_id;
-
- DRM_DEBUG_KMS("ipp_id[%d]\n", ipp_id);
+ int ret;
if (ipp_id) {
- /* find ipp driver using idr */
- ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
- ipp_id);
- if (IS_ERR(ippdrv)) {
- DRM_ERROR("not found ipp%d driver.\n", ipp_id);
- return ippdrv;
+ ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id);
+ if (!ippdrv) {
+ DRM_DEBUG("ipp%d driver not found\n", ipp_id);
+ return ERR_PTR(-ENODEV);
}
- /*
- * WB, OUTPUT opertion not supported multi-operation.
- * so, make dedicated state at set property ioctl.
- * when ipp driver finished operations, clear dedicated flags.
- */
- if (ipp_check_dedicated(ippdrv, property->cmd)) {
- DRM_ERROR("already used choose device.\n");
- return ERR_PTR(-EBUSY);
- }
-
- /*
- * This is necessary to find correct device in ipp drivers.
- * ipp drivers have different abilities,
- * so need to check property.
- */
- if (ippdrv->check_property &&
- ippdrv->check_property(ippdrv->dev, property)) {
- DRM_ERROR("not support property.\n");
- return ERR_PTR(-EINVAL);
+ ret = ipp_check_driver(ippdrv, property);
+ if (ret < 0) {
+ DRM_DEBUG("ipp%d driver check error %d\n", ipp_id, ret);
+ return ERR_PTR(ret);
}
return ippdrv;
} else {
- /*
- * This case is search all ipp driver for finding.
- * user application don't set ipp_id in this case,
- * so ipp subsystem search correct driver in driver list.
- */
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
- if (ipp_check_dedicated(ippdrv, property->cmd)) {
- DRM_DEBUG_KMS("used device.\n");
- continue;
- }
-
- if (ippdrv->check_property &&
- ippdrv->check_property(ippdrv->dev, property)) {
- DRM_DEBUG_KMS("not support property.\n");
- continue;
- }
-
- return ippdrv;
+ ret = ipp_check_driver(ippdrv, property);
+ if (ret == 0)
+ return ippdrv;
}
- DRM_ERROR("not support ipp driver operations.\n");
+ DRM_DEBUG("cannot find driver suitable for given property.\n");
}
return ERR_PTR(-ENODEV);
@@ -308,8 +255,7 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
- struct device *dev = priv->dev;
+ struct device *dev = file_priv->ipp_dev;
struct ipp_context *ctx = get_ipp_context(dev);
struct drm_exynos_ipp_prop_list *prop_list = data;
struct exynos_drm_ippdrv *ippdrv;
@@ -346,10 +292,10 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
*/
ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
prop_list->ipp_id);
- if (IS_ERR(ippdrv)) {
+ if (!ippdrv) {
DRM_ERROR("not found ipp%d driver.\n",
prop_list->ipp_id);
- return PTR_ERR(ippdrv);
+ return -ENODEV;
}
*prop_list = ippdrv->prop_list;
@@ -432,7 +378,7 @@ static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
if (!event_work)
return ERR_PTR(-ENOMEM);
- INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
+ INIT_WORK(&event_work->work, ipp_sched_event);
return event_work;
}
@@ -441,8 +387,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
- struct device *dev = priv->dev;
+ struct device *dev = file_priv->ipp_dev;
struct ipp_context *ctx = get_ipp_context(dev);
struct drm_exynos_ipp_property *property = data;
struct exynos_drm_ippdrv *ippdrv;
@@ -489,19 +434,18 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
if (!c_node)
return -ENOMEM;
- /* create property id */
- ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
- &property->prop_id);
- if (ret) {
+ ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node);
+ if (ret < 0) {
DRM_ERROR("failed to create id.\n");
goto err_clear;
}
+ property->prop_id = ret;
DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
property->prop_id, property->cmd, (int)ippdrv);
/* stored property information and ippdrv in private data */
- c_node->priv = priv;
+ c_node->dev = dev;
c_node->property = *property;
c_node->state = IPP_STATE_IDLE;
@@ -534,7 +478,6 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
INIT_LIST_HEAD(&c_node->mem_list[i]);
INIT_LIST_HEAD(&c_node->event_list);
- list_splice_init(&priv->event_list, &c_node->event_list);
mutex_lock(&ippdrv->cmd_lock);
list_add_tail(&c_node->list, &ippdrv->cmd_list);
mutex_unlock(&ippdrv->cmd_lock);
@@ -577,42 +520,18 @@ static void ipp_clean_cmd_node(struct ipp_context *ctx,
kfree(c_node);
}
-static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
+static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
{
- struct drm_exynos_ipp_property *property = &c_node->property;
- struct drm_exynos_ipp_mem_node *m_node;
- struct list_head *head;
- int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
-
- for_each_ipp_ops(i) {
- /* source/destination memory list */
- head = &c_node->mem_list[i];
-
- /* find memory node entry */
- list_for_each_entry(m_node, head, list) {
- DRM_DEBUG_KMS("%s,count[%d]m_node[0x%x]\n",
- i ? "dst" : "src", count[i], (int)m_node);
- count[i]++;
- }
+ switch (c_node->property.cmd) {
+ case IPP_CMD_WB:
+ return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
+ case IPP_CMD_OUTPUT:
+ return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
+ case IPP_CMD_M2M:
+ default:
+ return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
+ !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
}
-
- DRM_DEBUG_KMS("min[%d]max[%d]\n",
- min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
- max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
-
- /*
- * M2M operations should be need paired memory address.
- * so, need to check minimum count about src, dst.
- * other case not use paired memory, so use maximum count
- */
- if (ipp_is_m2m_cmd(property->cmd))
- ret = min(count[EXYNOS_DRM_OPS_SRC],
- count[EXYNOS_DRM_OPS_DST]);
- else
- ret = max(count[EXYNOS_DRM_OPS_SRC],
- count[EXYNOS_DRM_OPS_DST]);
-
- return ret;
}
static struct drm_exynos_ipp_mem_node
@@ -683,16 +602,14 @@ static struct drm_exynos_ipp_mem_node
struct drm_exynos_ipp_queue_buf *qbuf)
{
struct drm_exynos_ipp_mem_node *m_node;
- struct drm_exynos_ipp_buf_info buf_info;
- void *addr;
+ struct drm_exynos_ipp_buf_info *buf_info;
int i;
m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
if (!m_node)
return ERR_PTR(-ENOMEM);
- /* clear base address for error handling */
- memset(&buf_info, 0x0, sizeof(buf_info));
+ buf_info = &m_node->buf_info;
/* operations, buffer id */
m_node->ops_id = qbuf->ops_id;
@@ -707,6 +624,8 @@ static struct drm_exynos_ipp_mem_node
/* get dma address by handle */
if (qbuf->handle[i]) {
+ dma_addr_t *addr;
+
addr = exynos_drm_gem_get_dma_addr(drm_dev,
qbuf->handle[i], file);
if (IS_ERR(addr)) {
@@ -714,15 +633,14 @@ static struct drm_exynos_ipp_mem_node
goto err_clear;
}
- buf_info.handles[i] = qbuf->handle[i];
- buf_info.base[i] = *(dma_addr_t *) addr;
- DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%x]\n",
- i, buf_info.base[i], (int)buf_info.handles[i]);
+ buf_info->handles[i] = qbuf->handle[i];
+ buf_info->base[i] = *addr;
+ DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
+ buf_info->base[i], buf_info->handles[i]);
}
}
m_node->filp = file;
- m_node->buf_info = buf_info;
mutex_lock(&c_node->mem_lock);
list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
mutex_unlock(&c_node->mem_lock);
@@ -930,8 +848,7 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
- struct device *dev = priv->dev;
+ struct device *dev = file_priv->ipp_dev;
struct ipp_context *ctx = get_ipp_context(dev);
struct drm_exynos_ipp_queue_buf *qbuf = data;
struct drm_exynos_ipp_cmd_node *c_node;
@@ -955,9 +872,9 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
/* find command node */
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
qbuf->prop_id);
- if (IS_ERR(c_node)) {
+ if (!c_node) {
DRM_ERROR("failed to get command node.\n");
- return PTR_ERR(c_node);
+ return -ENODEV;
}
/* buffer control */
@@ -1062,9 +979,8 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
struct exynos_drm_ippdrv *ippdrv = NULL;
- struct device *dev = priv->dev;
+ struct device *dev = file_priv->ipp_dev;
struct ipp_context *ctx = get_ipp_context(dev);
struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
struct drm_exynos_ipp_cmd_work *cmd_work;
@@ -1091,9 +1007,9 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
cmd_ctrl->prop_id);
- if (IS_ERR(c_node)) {
+ if (!c_node) {
DRM_ERROR("invalid command node list.\n");
- return PTR_ERR(c_node);
+ return -ENODEV;
}
if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
@@ -1198,7 +1114,6 @@ static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
/* reset h/w block */
if (ippdrv->reset &&
ippdrv->reset(ippdrv->dev)) {
- DRM_ERROR("failed to reset.\n");
return -EINVAL;
}
@@ -1216,30 +1131,24 @@ static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
/* set format */
if (ops->set_fmt) {
ret = ops->set_fmt(ippdrv->dev, config->fmt);
- if (ret) {
- DRM_ERROR("not support format.\n");
+ if (ret)
return ret;
- }
}
/* set transform for rotation, flip */
if (ops->set_transf) {
ret = ops->set_transf(ippdrv->dev, config->degree,
config->flip, &swap);
- if (ret) {
- DRM_ERROR("not support tranf.\n");
- return -EINVAL;
- }
+ if (ret)
+ return ret;
}
/* set size */
if (ops->set_size) {
ret = ops->set_size(ippdrv->dev, swap, &config->pos,
&config->sz);
- if (ret) {
- DRM_ERROR("not support size.\n");
+ if (ret)
return ret;
- }
}
}
@@ -1283,11 +1192,6 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
m_node = list_first_entry(head,
struct drm_exynos_ipp_mem_node, list);
- if (!m_node) {
- DRM_ERROR("failed to get node.\n");
- ret = -EFAULT;
- goto err_unlock;
- }
DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
@@ -1545,11 +1449,6 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
m_node = list_first_entry(head,
struct drm_exynos_ipp_mem_node, list);
- if (!m_node) {
- DRM_ERROR("empty memory node.\n");
- ret = -ENOMEM;
- goto err_mem_unlock;
- }
tbuf_id[i] = m_node->buf_id;
DRM_DEBUG_KMS("%s buf_id[%d]\n",
@@ -1586,11 +1485,6 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
m_node = list_first_entry(head,
struct drm_exynos_ipp_mem_node, list);
- if (!m_node) {
- DRM_ERROR("empty memory node.\n");
- ret = -ENOMEM;
- goto err_mem_unlock;
- }
tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
@@ -1704,21 +1598,17 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
/* get ipp driver entry */
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
- u32 ipp_id;
-
ippdrv->drm_dev = drm_dev;
- ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
- &ipp_id);
- if (ret || ipp_id == 0) {
+ ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv);
+ if (ret < 0) {
DRM_ERROR("failed to create id.\n");
goto err;
}
+ ippdrv->prop_list.ipp_id = ret;
DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
- count++, (int)ippdrv, ipp_id);
-
- ippdrv->prop_list.ipp_id = ipp_id;
+ count++, (int)ippdrv, ret);
/* store parent device for node */
ippdrv->parent_dev = dev;
@@ -1776,17 +1666,10 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ipp_private *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- priv->dev = dev;
- file_priv->ipp_priv = priv;
- INIT_LIST_HEAD(&priv->event_list);
+ file_priv->ipp_dev = dev;
- DRM_DEBUG_KMS("done priv[0x%x]\n", (int)priv);
+ DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev);
return 0;
}
@@ -1795,13 +1678,12 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
struct exynos_drm_ippdrv *ippdrv = NULL;
struct ipp_context *ctx = get_ipp_context(dev);
struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
int count = 0;
- DRM_DEBUG_KMS("for priv[0x%x]\n", (int)priv);
+ DRM_DEBUG_KMS("for priv[0x%x]\n", (int)file_priv->ipp_dev);
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
mutex_lock(&ippdrv->cmd_lock);
@@ -1810,7 +1692,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
count++, (int)ippdrv);
- if (c_node->priv == priv) {
+ if (c_node->dev == file_priv->ipp_dev) {
/*
* userland goto unnormal state. process killed.
* and close the file.
@@ -1832,7 +1714,6 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
mutex_unlock(&ippdrv->cmd_lock);
}
- kfree(priv);
return;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
index 7aaeaae757c2..6f48d62aeb30 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -48,7 +48,7 @@ struct drm_exynos_ipp_cmd_work {
/*
* A structure of command node.
*
- * @priv: IPP private information.
+ * @dev: IPP device.
* @list: list head to command queue information.
* @event_list: list head of event.
* @mem_list: list head to source,destination memory queue information.
@@ -64,7 +64,7 @@ struct drm_exynos_ipp_cmd_work {
* @state: state of command node.
*/
struct drm_exynos_ipp_cmd_node {
- struct exynos_drm_ipp_private *priv;
+ struct device *dev;
struct list_head list;
struct list_head event_list;
struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index f01fbb6dc1f0..55af6b41c1df 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -691,6 +691,7 @@ static const struct of_device_id exynos_rotator_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, exynos_rotator_match);
static int rotator_probe(struct platform_device *pdev)
{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 2fb8705d6461..9528d81d8004 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -562,7 +562,7 @@ static int vidi_create_connector(struct exynos_drm_display *display,
}
drm_connector_helper_add(connector, &vidi_connector_helper_funcs);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index aa259b0a873a..562966db2aa1 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -84,6 +84,7 @@ struct hdmi_resources {
struct clk *sclk_hdmiphy;
struct clk *mout_hdmi;
struct regulator_bulk_data *regul_bulk;
+ struct regulator *reg_hdmi_en;
int regul_count;
};
@@ -592,6 +593,13 @@ static struct hdmi_driver_data exynos4212_hdmi_driver_data = {
.is_apb_phy = 0,
};
+static struct hdmi_driver_data exynos4210_hdmi_driver_data = {
+ .type = HDMI_TYPE13,
+ .phy_confs = hdmiphy_v13_configs,
+ .phy_conf_count = ARRAY_SIZE(hdmiphy_v13_configs),
+ .is_apb_phy = 0,
+};
+
static struct hdmi_driver_data exynos5_hdmi_driver_data = {
.type = HDMI_TYPE14,
.phy_confs = hdmiphy_v13_configs,
@@ -1129,7 +1137,7 @@ static int hdmi_create_connector(struct exynos_drm_display *display,
}
drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
return 0;
@@ -1241,14 +1249,13 @@ static void hdmi_reg_acr(struct hdmi_context *hdata, u8 *acr)
static void hdmi_audio_init(struct hdmi_context *hdata)
{
- u32 sample_rate, bits_per_sample, frame_size_code;
+ u32 sample_rate, bits_per_sample;
u32 data_num, bit_ch, sample_frq;
u32 val;
u8 acr[7];
sample_rate = 44100;
bits_per_sample = 16;
- frame_size_code = 0;
switch (bits_per_sample) {
case 20:
@@ -2168,7 +2175,6 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
struct device *dev = hdata->dev;
struct hdmi_resources *res = &hdata->res;
static char *supply[] = {
- "hdmi-en",
"vdd",
"vdd_osc",
"vdd_pll",
@@ -2228,6 +2234,20 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
}
res->regul_count = ARRAY_SIZE(supply);
+ res->reg_hdmi_en = devm_regulator_get(dev, "hdmi-en");
+ if (IS_ERR(res->reg_hdmi_en) && PTR_ERR(res->reg_hdmi_en) != -ENOENT) {
+ DRM_ERROR("failed to get hdmi-en regulator\n");
+ return PTR_ERR(res->reg_hdmi_en);
+ }
+ if (!IS_ERR(res->reg_hdmi_en)) {
+ ret = regulator_enable(res->reg_hdmi_en);
+ if (ret) {
+ DRM_ERROR("failed to enable hdmi-en regulator\n");
+ return ret;
+ }
+ } else
+ res->reg_hdmi_en = NULL;
+
return ret;
fail:
DRM_ERROR("HDMI resource init - failed\n");
@@ -2263,6 +2283,9 @@ static struct of_device_id hdmi_match_types[] = {
.compatible = "samsung,exynos5-hdmi",
.data = &exynos5_hdmi_driver_data,
}, {
+ .compatible = "samsung,exynos4210-hdmi",
+ .data = &exynos4210_hdmi_driver_data,
+ }, {
.compatible = "samsung,exynos4212-hdmi",
.data = &exynos4212_hdmi_driver_data,
}, {
@@ -2272,6 +2295,7 @@ static struct of_device_id hdmi_match_types[] = {
/* end node */
}
};
+MODULE_DEVICE_TABLE (of, hdmi_match_types);
static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
@@ -2494,7 +2518,11 @@ static int hdmi_remove(struct platform_device *pdev)
cancel_delayed_work_sync(&hdata->hotplug_work);
- put_device(&hdata->hdmiphy_port->dev);
+ if (hdata->res.reg_hdmi_en)
+ regulator_disable(hdata->res.reg_hdmi_en);
+
+ if (hdata->hdmiphy_port)
+ put_device(&hdata->hdmiphy_port->dev);
put_device(&hdata->ddc_adpt->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 7529946d0a74..e8b4ec84b312 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -76,7 +76,7 @@ struct mixer_resources {
struct clk *vp;
struct clk *sclk_mixer;
struct clk *sclk_hdmi;
- struct clk *sclk_dac;
+ struct clk *mout_mixer;
};
enum mixer_version_id {
@@ -93,6 +93,7 @@ struct mixer_context {
bool interlace;
bool powered;
bool vp_enabled;
+ bool has_sclk;
u32 int_en;
struct mutex mixer_mutex;
@@ -106,6 +107,7 @@ struct mixer_context {
struct mixer_drv_data {
enum mixer_version_id version;
bool is_vp_enabled;
+ bool has_sclk;
};
static const u8 filter_y_horiz_tap8[] = {
@@ -363,6 +365,11 @@ static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable)
vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
mixer_reg_writemask(res, MXR_CFG, val,
MXR_CFG_VP_ENABLE);
+
+ /* control blending of graphic layer 0 */
+ mixer_reg_writemask(res, MXR_GRAPHIC_CFG(0), val,
+ MXR_GRP_CFG_BLEND_PRE_MUL |
+ MXR_GRP_CFG_PIXEL_BLEND_EN);
}
break;
}
@@ -809,19 +816,23 @@ static int vp_resources_init(struct mixer_context *mixer_ctx)
dev_err(dev, "failed to get clock 'vp'\n");
return -ENODEV;
}
- mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
- if (IS_ERR(mixer_res->sclk_mixer)) {
- dev_err(dev, "failed to get clock 'sclk_mixer'\n");
- return -ENODEV;
- }
- mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
- if (IS_ERR(mixer_res->sclk_dac)) {
- dev_err(dev, "failed to get clock 'sclk_dac'\n");
- return -ENODEV;
- }
- if (mixer_res->sclk_hdmi)
- clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
+ if (mixer_ctx->has_sclk) {
+ mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
+ if (IS_ERR(mixer_res->sclk_mixer)) {
+ dev_err(dev, "failed to get clock 'sclk_mixer'\n");
+ return -ENODEV;
+ }
+ mixer_res->mout_mixer = devm_clk_get(dev, "mout_mixer");
+ if (IS_ERR(mixer_res->mout_mixer)) {
+ dev_err(dev, "failed to get clock 'mout_mixer'\n");
+ return -ENODEV;
+ }
+
+ if (mixer_res->sclk_hdmi && mixer_res->mout_mixer)
+ clk_set_parent(mixer_res->mout_mixer,
+ mixer_res->sclk_hdmi);
+ }
res = platform_get_resource(mixer_ctx->pdev, IORESOURCE_MEM, 1);
if (res == NULL) {
@@ -1082,7 +1093,8 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
clk_prepare_enable(res->mixer);
if (ctx->vp_enabled) {
clk_prepare_enable(res->vp);
- clk_prepare_enable(res->sclk_mixer);
+ if (ctx->has_sclk)
+ clk_prepare_enable(res->sclk_mixer);
}
mutex_lock(&ctx->mixer_mutex);
@@ -1121,7 +1133,8 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
clk_disable_unprepare(res->mixer);
if (ctx->vp_enabled) {
clk_disable_unprepare(res->vp);
- clk_disable_unprepare(res->sclk_mixer);
+ if (ctx->has_sclk)
+ clk_disable_unprepare(res->sclk_mixer);
}
pm_runtime_put_sync(ctx->dev);
@@ -1189,9 +1202,15 @@ static struct mixer_drv_data exynos5250_mxr_drv_data = {
.is_vp_enabled = 0,
};
+static struct mixer_drv_data exynos4212_mxr_drv_data = {
+ .version = MXR_VER_0_0_0_16,
+ .is_vp_enabled = 1,
+};
+
static struct mixer_drv_data exynos4210_mxr_drv_data = {
.version = MXR_VER_0_0_0_16,
.is_vp_enabled = 1,
+ .has_sclk = 1,
};
static struct platform_device_id mixer_driver_types[] = {
@@ -1208,6 +1227,12 @@ static struct platform_device_id mixer_driver_types[] = {
static struct of_device_id mixer_match_types[] = {
{
+ .compatible = "samsung,exynos4210-mixer",
+ .data = &exynos4210_mxr_drv_data,
+ }, {
+ .compatible = "samsung,exynos4212-mixer",
+ .data = &exynos4212_mxr_drv_data,
+ }, {
.compatible = "samsung,exynos5-mixer",
.data = &exynos5250_mxr_drv_data,
}, {
@@ -1220,6 +1245,7 @@ static struct of_device_id mixer_match_types[] = {
/* end node */
}
};
+MODULE_DEVICE_TABLE(of, mixer_match_types);
static int mixer_bind(struct device *dev, struct device *manager, void *data)
{
@@ -1251,6 +1277,7 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
ctx->pdev = pdev;
ctx->dev = dev;
ctx->vp_enabled = drv->is_vp_enabled;
+ ctx->has_sclk = drv->has_sclk;
ctx->mxr_ver = drv->version;
init_waitqueue_head(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index c18268cd516e..248c33a35ebf 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -192,7 +192,7 @@ static void cdv_intel_crt_destroy(struct drm_connector *connector)
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
psb_intel_i2c_destroy(gma_encoder->ddc_bus);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -304,7 +304,7 @@ void cdv_intel_crt_init(struct drm_device *dev,
drm_connector_helper_add(connector,
&cdv_intel_crt_connector_helper_funcs);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return;
failed_ddc:
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 9ff30c2efadb..a4cc0e60a1be 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1713,7 +1713,7 @@ cdv_intel_dp_destroy(struct drm_connector *connector)
}
}
i2c_del_adapter(&intel_dp->adapter);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -1847,7 +1847,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
/* Set up the DDC bus. */
switch (output_reg) {
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index b99084b3f706..4268bf210034 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -248,7 +248,7 @@ static void cdv_hdmi_destroy(struct drm_connector *connector)
if (gma_encoder->i2c_bus)
psb_intel_i2c_destroy(gma_encoder->i2c_bus);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -356,7 +356,7 @@ void cdv_hdmi_init(struct drm_device *dev,
hdmi_priv->hdmi_i2c_adapter = &(gma_encoder->i2c_bus->adapter);
hdmi_priv->dev = dev;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return;
failed_ddc:
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 8ecc920fc26d..0b770396548c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -446,7 +446,7 @@ static void cdv_intel_lvds_destroy(struct drm_connector *connector)
if (gma_encoder->i2c_bus)
psb_intel_i2c_destroy(gma_encoder->i2c_bus);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -774,7 +774,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
out:
mutex_unlock(&dev->mode_config.mutex);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return;
failed_find:
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index e7fcc148f333..d0dd3bea8aa5 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -561,7 +561,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
return psbfb_create(psb_fbdev, sizes);
}
-static struct drm_fb_helper_funcs psb_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs psb_fb_helper_funcs = {
.gamma_set = psbfb_gamma_set,
.gamma_get = psbfb_gamma_get,
.fb_probe = psbfb_probe,
@@ -600,7 +600,8 @@ int psb_fbdev_init(struct drm_device *dev)
}
dev_priv->fbdev = fbdev;
- fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs;
+
+ drm_fb_helper_prepare(dev, &fbdev->psb_fb_helper, &psb_fb_helper_funcs);
drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs,
INTELFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 592d205a0089..ce015db59dc6 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -206,7 +206,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
WARN_ON(gt->pages);
- pages = drm_gem_get_pages(&gt->gem, 0);
+ pages = drm_gem_get_pages(&gt->gem);
if (IS_ERR(pages))
return PTR_ERR(pages);
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 6e91b20ce2e5..abf2248da61e 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -318,7 +318,7 @@ static void mdfld_dsi_connector_destroy(struct drm_connector *connector)
if (!dsi_connector)
return;
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
sender = dsi_connector->pkg_sender;
mdfld_dsi_pkg_sender_destroy(sender);
@@ -597,7 +597,7 @@ void mdfld_dsi_output_init(struct drm_device *dev,
dsi_config->encoder = encoder;
encoder->base.type = (pipe == 0) ? INTEL_OUTPUT_MIPI :
INTEL_OUTPUT_MIPI2;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return;
/*TODO: add code to destroy outputs on error*/
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index cf018ddcc5a6..e6f5c620a0a2 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -665,7 +665,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
dev_info(dev->dev, "HDMI initialised.\n");
return;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 9b099468a5db..0d39da6e8b7a 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -404,7 +404,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
out:
mutex_unlock(&dev->mode_config.mutex);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return;
failed_find:
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index d7778d0472c1..88aad95bde09 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -563,7 +563,7 @@ void psb_intel_lvds_destroy(struct drm_connector *connector)
if (lvds_priv->ddc_bus)
psb_intel_i2c_destroy(lvds_priv->ddc_bus);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -829,7 +829,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
*/
out:
mutex_unlock(&dev->mode_config.mutex);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return;
failed_find:
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index deeb0829b129..0be96fdb5e28 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1682,7 +1682,7 @@ static void psb_intel_sdvo_destroy(struct drm_connector *connector)
psb_intel_sdvo_connector->tv_format);
psb_intel_sdvo_destroy_enhance_property(connector);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -2071,7 +2071,7 @@ psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
gma_connector_attach_encoder(&connector->base, &encoder->base);
- drm_sysfs_connector_add(&connector->base.base);
+ drm_connector_register(&connector->base.base);
}
static void
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index ac357b02bd35..d4762799351d 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -15,8 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-
-
+#include <linux/component.h>
#include <linux/hdmi.h>
#include <linux/module.h>
#include <linux/irq.h>
@@ -730,12 +729,9 @@ tda998x_configure_audio(struct tda998x_priv *priv,
/* DRM encoder functions */
-static void
-tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
+static void tda998x_encoder_set_config(struct tda998x_priv *priv,
+ const struct tda998x_encoder_params *p)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
- struct tda998x_encoder_params *p = params;
-
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
(p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
VIP_CNTRL_0_SWAP_B(p->swap_b) |
@@ -752,11 +748,8 @@ tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
priv->params = *p;
}
-static void
-tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void tda998x_encoder_dpms(struct tda998x_priv *priv, int mode)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
-
/* we only care about on or off: */
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
@@ -806,9 +799,8 @@ tda998x_encoder_mode_fixup(struct drm_encoder *encoder,
return true;
}
-static int
-tda998x_encoder_mode_valid(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+static int tda998x_encoder_mode_valid(struct tda998x_priv *priv,
+ struct drm_display_mode *mode)
{
if (mode->clock > 150000)
return MODE_CLOCK_HIGH;
@@ -820,11 +812,10 @@ tda998x_encoder_mode_valid(struct drm_encoder *encoder,
}
static void
-tda998x_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+tda998x_encoder_mode_set(struct tda998x_priv *priv,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
uint16_t ref_pix, ref_line, n_pix, n_line;
uint16_t hs_pix_s, hs_pix_e;
uint16_t vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
@@ -1012,20 +1003,16 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
}
static enum drm_connector_status
-tda998x_encoder_detect(struct drm_encoder *encoder,
- struct drm_connector *connector)
+tda998x_encoder_detect(struct tda998x_priv *priv)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
uint8_t val = cec_read(priv, REG_CEC_RXSHPDLEV);
return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
connector_status_disconnected;
}
-static int
-read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
+static int read_edid_block(struct tda998x_priv *priv, uint8_t *buf, int blk)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
uint8_t offset, segptr;
int ret, i;
@@ -1079,10 +1066,8 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
return 0;
}
-static uint8_t *
-do_get_edid(struct drm_encoder *encoder)
+static uint8_t *do_get_edid(struct tda998x_priv *priv)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
int j, valid_extensions = 0;
uint8_t *block, *new;
bool print_bad_edid = drm_debug & DRM_UT_KMS;
@@ -1094,7 +1079,7 @@ do_get_edid(struct drm_encoder *encoder)
reg_clear(priv, REG_TX4, TX4_PD_RAM);
/* base block fetch */
- if (read_edid_block(encoder, block, 0))
+ if (read_edid_block(priv, block, 0))
goto fail;
if (!drm_edid_block_valid(block, 0, print_bad_edid))
@@ -1111,7 +1096,7 @@ do_get_edid(struct drm_encoder *encoder)
for (j = 1; j <= block[0x7e]; j++) {
uint8_t *ext_block = block + (valid_extensions + 1) * EDID_LENGTH;
- if (read_edid_block(encoder, ext_block, j))
+ if (read_edid_block(priv, ext_block, j))
goto fail;
if (!drm_edid_block_valid(ext_block, j, print_bad_edid))
@@ -1144,11 +1129,10 @@ fail:
}
static int
-tda998x_encoder_get_modes(struct drm_encoder *encoder,
- struct drm_connector *connector)
+tda998x_encoder_get_modes(struct tda998x_priv *priv,
+ struct drm_connector *connector)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
- struct edid *edid = (struct edid *)do_get_edid(encoder);
+ struct edid *edid = (struct edid *)do_get_edid(priv);
int n = 0;
if (edid) {
@@ -1161,18 +1145,14 @@ tda998x_encoder_get_modes(struct drm_encoder *encoder,
return n;
}
-static int
-tda998x_encoder_create_resources(struct drm_encoder *encoder,
- struct drm_connector *connector)
+static void tda998x_encoder_set_polling(struct tda998x_priv *priv,
+ struct drm_connector *connector)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
-
if (priv->hdmi->irq)
connector->polled = DRM_CONNECTOR_POLL_HPD;
else
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
- return 0;
}
static int
@@ -1185,66 +1165,97 @@ tda998x_encoder_set_property(struct drm_encoder *encoder,
return 0;
}
-static void
-tda998x_encoder_destroy(struct drm_encoder *encoder)
+static void tda998x_destroy(struct tda998x_priv *priv)
{
- struct tda998x_priv *priv = to_tda998x_priv(encoder);
-
/* disable all IRQs and free the IRQ handler */
cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
if (priv->hdmi->irq)
free_irq(priv->hdmi->irq, priv);
- if (priv->cec)
- i2c_unregister_device(priv->cec);
+ i2c_unregister_device(priv->cec);
+}
+
+/* Slave encoder support */
+
+static void
+tda998x_encoder_slave_set_config(struct drm_encoder *encoder, void *params)
+{
+ tda998x_encoder_set_config(to_tda998x_priv(encoder), params);
+}
+
+static void tda998x_encoder_slave_destroy(struct drm_encoder *encoder)
+{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+
+ tda998x_destroy(priv);
drm_i2c_encoder_destroy(encoder);
kfree(priv);
}
-static struct drm_encoder_slave_funcs tda998x_encoder_funcs = {
- .set_config = tda998x_encoder_set_config,
- .destroy = tda998x_encoder_destroy,
- .dpms = tda998x_encoder_dpms,
- .save = tda998x_encoder_save,
- .restore = tda998x_encoder_restore,
- .mode_fixup = tda998x_encoder_mode_fixup,
- .mode_valid = tda998x_encoder_mode_valid,
- .mode_set = tda998x_encoder_mode_set,
- .detect = tda998x_encoder_detect,
- .get_modes = tda998x_encoder_get_modes,
- .create_resources = tda998x_encoder_create_resources,
- .set_property = tda998x_encoder_set_property,
-};
+static void tda998x_encoder_slave_dpms(struct drm_encoder *encoder, int mode)
+{
+ tda998x_encoder_dpms(to_tda998x_priv(encoder), mode);
+}
-/* I2C driver functions */
+static int tda998x_encoder_slave_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ return tda998x_encoder_mode_valid(to_tda998x_priv(encoder), mode);
+}
-static int
-tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static void
+tda998x_encoder_slave_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- return 0;
+ tda998x_encoder_mode_set(to_tda998x_priv(encoder), mode, adjusted_mode);
+}
+
+static enum drm_connector_status
+tda998x_encoder_slave_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ return tda998x_encoder_detect(to_tda998x_priv(encoder));
+}
+
+static int tda998x_encoder_slave_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ return tda998x_encoder_get_modes(to_tda998x_priv(encoder), connector);
}
static int
-tda998x_remove(struct i2c_client *client)
+tda998x_encoder_slave_create_resources(struct drm_encoder *encoder,
+ struct drm_connector *connector)
{
+ tda998x_encoder_set_polling(to_tda998x_priv(encoder), connector);
return 0;
}
-static int
-tda998x_encoder_init(struct i2c_client *client,
- struct drm_device *dev,
- struct drm_encoder_slave *encoder_slave)
+static struct drm_encoder_slave_funcs tda998x_encoder_slave_funcs = {
+ .set_config = tda998x_encoder_slave_set_config,
+ .destroy = tda998x_encoder_slave_destroy,
+ .dpms = tda998x_encoder_slave_dpms,
+ .save = tda998x_encoder_save,
+ .restore = tda998x_encoder_restore,
+ .mode_fixup = tda998x_encoder_mode_fixup,
+ .mode_valid = tda998x_encoder_slave_mode_valid,
+ .mode_set = tda998x_encoder_slave_mode_set,
+ .detect = tda998x_encoder_slave_detect,
+ .get_modes = tda998x_encoder_slave_get_modes,
+ .create_resources = tda998x_encoder_slave_create_resources,
+ .set_property = tda998x_encoder_set_property,
+};
+
+/* I2C driver functions */
+
+static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
{
- struct tda998x_priv *priv;
struct device_node *np = client->dev.of_node;
u32 video;
int rev_lo, rev_hi, ret;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
@@ -1252,17 +1263,11 @@ tda998x_encoder_init(struct i2c_client *client,
priv->current_page = 0xff;
priv->hdmi = client;
priv->cec = i2c_new_dummy(client->adapter, 0x34);
- if (!priv->cec) {
- kfree(priv);
+ if (!priv->cec)
return -ENODEV;
- }
- priv->encoder = &encoder_slave->base;
priv->dpms = DRM_MODE_DPMS_OFF;
- encoder_slave->slave_priv = priv;
- encoder_slave->slave_funcs = &tda998x_encoder_funcs;
-
/* wake up the device: */
cec_write(priv, REG_CEC_ENAMODS,
CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
@@ -1365,12 +1370,231 @@ fail:
*/
if (priv->cec)
i2c_unregister_device(priv->cec);
- kfree(priv);
- encoder_slave->slave_priv = NULL;
- encoder_slave->slave_funcs = NULL;
return -ENXIO;
}
+static int tda998x_encoder_init(struct i2c_client *client,
+ struct drm_device *dev,
+ struct drm_encoder_slave *encoder_slave)
+{
+ struct tda998x_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->encoder = &encoder_slave->base;
+
+ ret = tda998x_create(client, priv);
+ if (ret) {
+ kfree(priv);
+ return ret;
+ }
+
+ encoder_slave->slave_priv = priv;
+ encoder_slave->slave_funcs = &tda998x_encoder_slave_funcs;
+
+ return 0;
+}
+
+struct tda998x_priv2 {
+ struct tda998x_priv base;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+};
+
+#define conn_to_tda998x_priv2(x) \
+ container_of(x, struct tda998x_priv2, connector);
+
+#define enc_to_tda998x_priv2(x) \
+ container_of(x, struct tda998x_priv2, encoder);
+
+static void tda998x_encoder2_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder);
+
+ tda998x_encoder_dpms(&priv->base, mode);
+}
+
+static void tda998x_encoder_prepare(struct drm_encoder *encoder)
+{
+ tda998x_encoder2_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void tda998x_encoder_commit(struct drm_encoder *encoder)
+{
+ tda998x_encoder2_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static void tda998x_encoder2_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder);
+
+ tda998x_encoder_mode_set(&priv->base, mode, adjusted_mode);
+}
+
+static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = {
+ .dpms = tda998x_encoder2_dpms,
+ .save = tda998x_encoder_save,
+ .restore = tda998x_encoder_restore,
+ .mode_fixup = tda998x_encoder_mode_fixup,
+ .prepare = tda998x_encoder_prepare,
+ .commit = tda998x_encoder_commit,
+ .mode_set = tda998x_encoder2_mode_set,
+};
+
+static void tda998x_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct tda998x_priv2 *priv = enc_to_tda998x_priv2(encoder);
+
+ tda998x_destroy(&priv->base);
+ drm_encoder_cleanup(encoder);
+}
+
+static const struct drm_encoder_funcs tda998x_encoder_funcs = {
+ .destroy = tda998x_encoder_destroy,
+};
+
+static int tda998x_connector_get_modes(struct drm_connector *connector)
+{
+ struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
+
+ return tda998x_encoder_get_modes(&priv->base, connector);
+}
+
+static int tda998x_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
+
+ return tda998x_encoder_mode_valid(&priv->base, mode);
+}
+
+static struct drm_encoder *
+tda998x_connector_best_encoder(struct drm_connector *connector)
+{
+ struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
+
+ return &priv->encoder;
+}
+
+static
+const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
+ .get_modes = tda998x_connector_get_modes,
+ .mode_valid = tda998x_connector_mode_valid,
+ .best_encoder = tda998x_connector_best_encoder,
+};
+
+static enum drm_connector_status
+tda998x_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct tda998x_priv2 *priv = conn_to_tda998x_priv2(connector);
+
+ return tda998x_encoder_detect(&priv->base);
+}
+
+static void tda998x_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs tda998x_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = tda998x_connector_detect,
+ .destroy = tda998x_connector_destroy,
+};
+
+static int tda998x_bind(struct device *dev, struct device *master, void *data)
+{
+ struct tda998x_encoder_params *params = dev->platform_data;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct drm_device *drm = data;
+ struct tda998x_priv2 *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
+ priv->base.encoder = &priv->encoder;
+ priv->connector.interlace_allowed = 1;
+ priv->encoder.possible_crtcs = 1 << 0;
+
+ ret = tda998x_create(client, &priv->base);
+ if (ret)
+ return ret;
+
+ if (!dev->of_node && params)
+ tda998x_encoder_set_config(&priv->base, params);
+
+ tda998x_encoder_set_polling(&priv->base, &priv->connector);
+
+ drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs);
+ ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ if (ret)
+ goto err_encoder;
+
+ drm_connector_helper_add(&priv->connector,
+ &tda998x_connector_helper_funcs);
+ ret = drm_connector_init(drm, &priv->connector,
+ &tda998x_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret)
+ goto err_connector;
+
+ ret = drm_connector_register(&priv->connector);
+ if (ret)
+ goto err_sysfs;
+
+ priv->connector.encoder = &priv->encoder;
+ drm_mode_connector_attach_encoder(&priv->connector, &priv->encoder);
+
+ return 0;
+
+err_sysfs:
+ drm_connector_cleanup(&priv->connector);
+err_connector:
+ drm_encoder_cleanup(&priv->encoder);
+err_encoder:
+ tda998x_destroy(&priv->base);
+ return ret;
+}
+
+static void tda998x_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct tda998x_priv2 *priv = dev_get_drvdata(dev);
+
+ drm_connector_cleanup(&priv->connector);
+ drm_encoder_cleanup(&priv->encoder);
+ tda998x_destroy(&priv->base);
+}
+
+static const struct component_ops tda998x_ops = {
+ .bind = tda998x_bind,
+ .unbind = tda998x_unbind,
+};
+
+static int
+tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ return component_add(&client->dev, &tda998x_ops);
+}
+
+static int tda998x_remove(struct i2c_client *client)
+{
+ component_del(&client->dev, &tda998x_ops);
+ return 0;
+}
+
#ifdef CONFIG_OF
static const struct of_device_id tda998x_dt_ids[] = {
{ .compatible = "nxp,tda998x", },
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 437e1824d0bf..4e39ab34eb1c 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -69,15 +69,3 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT
option changes the default for that module option.
If in doubt, say "N".
-
-config DRM_I915_UMS
- bool "Enable userspace modesetting on Intel hardware (DEPRECATED)"
- depends on DRM_I915 && BROKEN
- default n
- help
- Choose this option if you still need userspace modesetting.
-
- Userspace modesetting is deprecated for quite some time now, so
- enable this only if you have ancient versions of the DDX drivers.
-
- If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index cad1683d8bb5..91bd167e1cb7 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -59,6 +59,7 @@ i915-y += dvo_ch7017.o \
intel_crt.o \
intel_ddi.o \
intel_dp.o \
+ intel_dp_mst.o \
intel_dsi_cmd.o \
intel_dsi.o \
intel_dsi_pll.o \
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 9d7954366bd2..dea99d92fb4a 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -426,6 +426,9 @@ static const u32 gen7_render_regs[] = {
GEN7_SO_WRITE_OFFSET(1),
GEN7_SO_WRITE_OFFSET(2),
GEN7_SO_WRITE_OFFSET(3),
+ GEN7_L3SQCREG1,
+ GEN7_L3CNTLREG2,
+ GEN7_L3CNTLREG3,
};
static const u32 gen7_blt_regs[] = {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b8c689202c40..9e737b771c40 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -170,11 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
}
if (obj->ring != NULL)
seq_printf(m, " (%s)", obj->ring->name);
+ if (obj->frontbuffer_bits)
+ seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
}
static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
{
- seq_putc(m, ctx->is_initialized ? 'I' : 'i');
+ seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, ' ');
}
@@ -515,6 +517,11 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
unsigned long flags;
struct intel_crtc *crtc;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
for_each_intel_crtc(dev, crtc) {
const char pipe = pipe_name(crtc->pipe);
@@ -556,6 +563,8 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -985,29 +994,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
i915_next_seqno_get, i915_next_seqno_set,
"0x%llx\n");
-static int i915_rstdby_delays(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u16 crstanddelay;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
-
- crstanddelay = I915_READ16(CRSTANDVID);
-
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
-
- return 0;
-}
-
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
@@ -1029,7 +1015,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
MEMSTAT_VID_SHIFT);
seq_printf(m, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
- } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
+ } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) ||
+ IS_BROADWELL(dev)) {
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -1048,7 +1035,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
reqf = I915_READ(GEN6_RPNSWREQ);
reqf &= ~GEN6_TURBO_DISABLE;
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
reqf >>= 24;
else
reqf >>= 25;
@@ -1065,7 +1052,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
- if (IS_HASWELL(dev))
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
@@ -1121,20 +1108,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Max overclocked frequency: %dMHz\n",
dev_priv->rps.max_freq * GT_FREQUENCY_MULTIPLIER);
} else if (IS_VALLEYVIEW(dev)) {
- u32 freq_sts, val;
+ u32 freq_sts;
mutex_lock(&dev_priv->rps.hw_lock);
freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
- val = valleyview_rps_max_freq(dev_priv);
seq_printf(m, "max GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv, val));
+ vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq));
- val = valleyview_rps_min_freq(dev_priv);
seq_printf(m, "min GPU freq: %d MHz\n",
- vlv_gpu_freq(dev_priv, val));
+ vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+
+ seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
seq_printf(m, "current GPU freq: %d MHz\n",
vlv_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
@@ -1148,61 +1136,6 @@ out:
return ret;
}
-static int i915_delayfreq_table(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 delayfreq;
- int ret, i;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
-
- for (i = 0; i < 16; i++) {
- delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
- seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq,
- (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
- }
-
- intel_runtime_pm_put(dev_priv);
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
-static inline int MAP_TO_MV(int map)
-{
- return 1250 - (map * 25);
-}
-
-static int i915_inttoext_table(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 inttoext;
- int ret, i;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
-
- for (i = 1; i <= 32; i++) {
- inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
- seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
- }
-
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = m->private;
@@ -1513,10 +1446,17 @@ static int i915_ips_status(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
- if (IS_BROADWELL(dev) || I915_READ(IPS_CTL) & IPS_ENABLE)
- seq_puts(m, "enabled\n");
- else
- seq_puts(m, "disabled\n");
+ seq_printf(m, "Enabled by kernel parameter: %s\n",
+ yesno(i915.enable_ips));
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ seq_puts(m, "Currently: unknown\n");
+ } else {
+ if (I915_READ(IPS_CTL) & IPS_ENABLE)
+ seq_puts(m, "Currently: enabled\n");
+ else
+ seq_puts(m, "Currently: disabled\n");
+ }
intel_runtime_pm_put(dev_priv);
@@ -1620,26 +1560,6 @@ out:
return ret;
}
-static int i915_gfxec(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
-
- seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
- intel_runtime_pm_put(dev_priv);
-
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
-}
-
static int i915_opregion(struct seq_file *m, void *unused)
{
struct drm_info_node *node = m->private;
@@ -1677,9 +1597,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
#ifdef CONFIG_DRM_I915_FBDEV
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
- if (ret)
- return ret;
ifbdev = dev_priv->fbdev;
fb = to_intel_framebuffer(ifbdev->helper.fb);
@@ -1692,7 +1609,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
seq_putc(m, '\n');
- mutex_unlock(&dev->mode_config.mutex);
#endif
mutex_lock(&dev->mode_config.fb_lock);
@@ -1723,7 +1639,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
struct intel_context *ctx;
int ret, i;
- ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
@@ -1740,7 +1656,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
}
list_for_each_entry(ctx, &dev_priv->context_list, link) {
- if (ctx->obj == NULL)
+ if (ctx->legacy_hw_ctx.rcs_state == NULL)
continue;
seq_puts(m, "HW context ");
@@ -1749,11 +1665,11 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ring->default_context == ctx)
seq_printf(m, "(default context %s) ", ring->name);
- describe_obj(m, ctx->obj);
+ describe_obj(m, ctx->legacy_hw_ctx.rcs_state);
seq_putc(m, '\n');
}
- mutex_unlock(&dev->mode_config.mutex);
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -1863,7 +1779,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
if (i915_gem_context_is_default(ctx))
seq_puts(m, " default context:\n");
else
- seq_printf(m, " context %d:\n", ctx->id);
+ seq_printf(m, " context %d:\n", ctx->user_handle);
ppgtt->debug_dump(ppgtt, m);
return 0;
@@ -1976,17 +1892,25 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
intel_runtime_pm_get(dev_priv);
+ mutex_lock(&dev_priv->psr.lock);
seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
+ seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
+ seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
+ seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
+ dev_priv->psr.busy_frontbuffer_bits);
+ seq_printf(m, "Re-enable work scheduled: %s\n",
+ yesno(work_busy(&dev_priv->psr.work.work)));
enabled = HAS_PSR(dev) &&
I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
- seq_printf(m, "Enabled: %s\n", yesno(enabled));
+ seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
if (HAS_PSR(dev))
psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
EDP_PSR_PERF_CNT_MASK;
seq_printf(m, "Performance_Counter: %u\n", psrperf);
+ mutex_unlock(&dev_priv->psr.lock);
intel_runtime_pm_put(dev_priv);
return 0;
@@ -2072,7 +1996,7 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy));
seq_printf(m, "IRQs disabled: %s\n",
- yesno(dev_priv->pm.irqs_disabled));
+ yesno(!intel_irqs_enabled(dev_priv)));
return 0;
}
@@ -2126,6 +2050,8 @@ static const char *power_domain_str(enum intel_display_power_domain domain)
return "VGA";
case POWER_DOMAIN_AUDIO:
return "AUDIO";
+ case POWER_DOMAIN_PLLS:
+ return "PLLS";
case POWER_DOMAIN_INIT:
return "INIT";
default:
@@ -2223,9 +2149,12 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder;
- seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
- crtc->primary->fb->base.id, crtc->x, crtc->y,
- crtc->primary->fb->width, crtc->primary->fb->height);
+ if (crtc->primary->fb)
+ seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
+ crtc->primary->fb->base.id, crtc->x, crtc->y,
+ crtc->primary->fb->width, crtc->primary->fb->height);
+ else
+ seq_puts(m, "\tprimary plane disabled\n");
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
intel_encoder_info(m, intel_crtc, intel_encoder);
}
@@ -2287,13 +2216,15 @@ static void intel_connector_info(struct seq_file *m,
seq_printf(m, "\tCEA rev: %d\n",
connector->display_info.cea_rev);
}
- if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
- intel_encoder->type == INTEL_OUTPUT_EDP)
- intel_dp_info(m, intel_connector);
- else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
- intel_hdmi_info(m, intel_connector);
- else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
- intel_lvds_info(m, intel_connector);
+ if (intel_encoder) {
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_EDP)
+ intel_dp_info(m, intel_connector);
+ else if (intel_encoder->type == INTEL_OUTPUT_HDMI)
+ intel_hdmi_info(m, intel_connector);
+ else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
+ intel_lvds_info(m, intel_connector);
+ }
seq_printf(m, "\tmodes:\n");
list_for_each_entry(mode, &connector->modes, head)
@@ -2347,17 +2278,17 @@ static int i915_display_info(struct seq_file *m, void *unused)
bool active;
int x, y;
- seq_printf(m, "CRTC %d: pipe: %c, active: %s\n",
+ seq_printf(m, "CRTC %d: pipe: %c, active=%s (size=%dx%d)\n",
crtc->base.base.id, pipe_name(crtc->pipe),
- yesno(crtc->active));
+ yesno(crtc->active), crtc->config.pipe_src_w, crtc->config.pipe_src_h);
if (crtc->active) {
intel_crtc_info(m, crtc);
active = cursor_position(dev, crtc->pipe, &x, &y);
- seq_printf(m, "\tcursor visible? %s, position (%d, %d), addr 0x%08x, active? %s\n",
+ seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
yesno(crtc->cursor_base),
- x, y, crtc->cursor_addr,
- yesno(active));
+ x, y, crtc->cursor_width, crtc->cursor_height,
+ crtc->cursor_addr, yesno(active));
}
seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
@@ -2377,12 +2308,132 @@ static int i915_display_info(struct seq_file *m, void *unused)
return 0;
}
+static int i915_semaphore_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ int i, j, ret;
+
+ if (!i915_semaphore_is_enabled(dev)) {
+ seq_puts(m, "Semaphores are disabled\n");
+ return 0;
+ }
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+ intel_runtime_pm_get(dev_priv);
+
+ if (IS_BROADWELL(dev)) {
+ struct page *page;
+ uint64_t *seqno;
+
+ page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0);
+
+ seqno = (uint64_t *)kmap_atomic(page);
+ for_each_ring(ring, dev_priv, i) {
+ uint64_t offset;
+
+ seq_printf(m, "%s\n", ring->name);
+
+ seq_puts(m, " Last signal:");
+ for (j = 0; j < num_rings; j++) {
+ offset = i * I915_NUM_RINGS + j;
+ seq_printf(m, "0x%08llx (0x%02llx) ",
+ seqno[offset], offset * 8);
+ }
+ seq_putc(m, '\n');
+
+ seq_puts(m, " Last wait: ");
+ for (j = 0; j < num_rings; j++) {
+ offset = i + (j * I915_NUM_RINGS);
+ seq_printf(m, "0x%08llx (0x%02llx) ",
+ seqno[offset], offset * 8);
+ }
+ seq_putc(m, '\n');
+
+ }
+ kunmap_atomic(seqno);
+ } else {
+ seq_puts(m, " Last signal:");
+ for_each_ring(ring, dev_priv, i)
+ for (j = 0; j < num_rings; j++)
+ seq_printf(m, "0x%08x\n",
+ I915_READ(ring->semaphore.mbox.signal[j]));
+ seq_putc(m, '\n');
+ }
+
+ seq_puts(m, "\nSync seqno:\n");
+ for_each_ring(ring, dev_priv, i) {
+ for (j = 0; j < num_rings; j++) {
+ seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]);
+ }
+ seq_putc(m, '\n');
+ }
+ seq_putc(m, '\n');
+
+ intel_runtime_pm_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+}
+
+static int i915_shared_dplls_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ drm_modeset_lock_all(dev);
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+ seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
+ seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount,
+ pll->active, yesno(pll->on));
+ seq_printf(m, " tracked hardware state:\n");
+ seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll);
+ seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md);
+ seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0);
+ seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1);
+ seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll);
+ }
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
struct pipe_crc_info {
const char *name;
struct drm_device *dev;
enum pipe pipe;
};
+static int i915_dp_mst_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_encoder *encoder;
+ struct intel_encoder *intel_encoder;
+ struct intel_digital_port *intel_dig_port;
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ intel_encoder = to_intel_encoder(encoder);
+ if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
+ continue;
+ intel_dig_port = enc_to_dig_port(encoder);
+ if (!intel_dig_port->dp.can_mst)
+ continue;
+
+ drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
+ }
+ drm_modeset_unlock_all(dev);
+ return 0;
+}
+
static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
{
struct pipe_crc_info *info = inode->i_private;
@@ -2849,7 +2900,60 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+
+ drm_modeset_lock_all(dev);
+ /*
+ * If we use the eDP transcoder we need to make sure that we don't
+ * bypass the pfit, since otherwise the pipe CRC source won't work. Only
+ * relevant on hsw with pipe A when using the always-on power well
+ * routing.
+ */
+ if (crtc->config.cpu_transcoder == TRANSCODER_EDP &&
+ !crtc->config.pch_pfit.enabled) {
+ crtc->config.pch_pfit.force_thru = true;
+
+ intel_display_power_get(dev_priv,
+ POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
+
+ dev_priv->display.crtc_disable(&crtc->base);
+ dev_priv->display.crtc_enable(&crtc->base);
+ }
+ drm_modeset_unlock_all(dev);
+}
+
+static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
+
+ drm_modeset_lock_all(dev);
+ /*
+ * If we use the eDP transcoder we need to make sure that we don't
+ * bypass the pfit, since otherwise the pipe CRC source won't work. Only
+ * relevant on hsw with pipe A when using the always-on power well
+ * routing.
+ */
+ if (crtc->config.pch_pfit.force_thru) {
+ crtc->config.pch_pfit.force_thru = false;
+
+ dev_priv->display.crtc_disable(&crtc->base);
+ dev_priv->display.crtc_enable(&crtc->base);
+
+ intel_display_power_put(dev_priv,
+ POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
+ }
+ drm_modeset_unlock_all(dev);
+}
+
+static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
uint32_t *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
@@ -2863,6 +2967,9 @@ static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_PF:
+ if (IS_HASWELL(dev) && pipe == PIPE_A)
+ hsw_trans_edp_pipe_A_crc_wa(dev);
+
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_NONE:
@@ -2895,11 +3002,11 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
else if (INTEL_INFO(dev)->gen < 5)
ret = i9xx_pipe_crc_ctl_reg(dev, pipe, &source, &val);
else if (IS_VALLEYVIEW(dev))
- ret = vlv_pipe_crc_ctl_reg(dev,pipe, &source, &val);
+ ret = vlv_pipe_crc_ctl_reg(dev, pipe, &source, &val);
else if (IS_GEN5(dev) || IS_GEN6(dev))
ret = ilk_pipe_crc_ctl_reg(&source, &val);
else
- ret = ivb_pipe_crc_ctl_reg(&source, &val);
+ ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
if (ret != 0)
return ret;
@@ -2929,11 +3036,16 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
/* real source -> none transition */
if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
struct intel_pipe_crc_entry *entries;
+ struct intel_crtc *crtc =
+ to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
pipe_name(pipe));
- intel_wait_for_vblank(dev, pipe);
+ drm_modeset_lock(&crtc->base.mutex, NULL);
+ if (crtc->active)
+ intel_wait_for_vblank(dev, pipe);
+ drm_modeset_unlock(&crtc->base.mutex);
spin_lock_irq(&pipe_crc->lock);
entries = pipe_crc->entries;
@@ -2946,6 +3058,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
g4x_undo_pipe_scramble_reset(dev, pipe);
else if (IS_VALLEYVIEW(dev))
vlv_undo_pipe_scramble_reset(dev, pipe);
+ else if (IS_HASWELL(dev) && pipe == PIPE_A)
+ hsw_undo_trans_edp_pipe_A_crc_wa(dev);
}
return 0;
@@ -3177,7 +3291,7 @@ static int pri_wm_latency_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
return -ENODEV;
return single_open(file, pri_wm_latency_show, dev);
@@ -3187,7 +3301,7 @@ static int spr_wm_latency_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
return -ENODEV;
return single_open(file, spr_wm_latency_show, dev);
@@ -3197,7 +3311,7 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
return -ENODEV;
return single_open(file, cur_wm_latency_show, dev);
@@ -3506,7 +3620,7 @@ i915_max_freq_get(void *data, u64 *val)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3532,7 +3646,7 @@ i915_max_freq_set(void *data, u64 val)
u32 rp_state_cap, hw_max, hw_min;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3549,8 +3663,8 @@ i915_max_freq_set(void *data, u64 val)
if (IS_VALLEYVIEW(dev)) {
val = vlv_freq_opcode(dev_priv, val);
- hw_max = valleyview_rps_max_freq(dev_priv);
- hw_min = valleyview_rps_min_freq(dev_priv);
+ hw_max = dev_priv->rps.max_freq;
+ hw_min = dev_priv->rps.min_freq;
} else {
do_div(val, GT_FREQUENCY_MULTIPLIER);
@@ -3587,7 +3701,7 @@ i915_min_freq_get(void *data, u64 *val)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3613,7 +3727,7 @@ i915_min_freq_set(void *data, u64 val)
u32 rp_state_cap, hw_max, hw_min;
int ret;
- if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ if (INTEL_INFO(dev)->gen < 6)
return -ENODEV;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -3630,8 +3744,8 @@ i915_min_freq_set(void *data, u64 val)
if (IS_VALLEYVIEW(dev)) {
val = vlv_freq_opcode(dev_priv, val);
- hw_max = valleyview_rps_max_freq(dev_priv);
- hw_min = valleyview_rps_min_freq(dev_priv);
+ hw_max = dev_priv->rps.max_freq;
+ hw_min = dev_priv->rps.min_freq;
} else {
do_div(val, GT_FREQUENCY_MULTIPLIER);
@@ -3799,14 +3913,10 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
{"i915_gem_hws_vebox", i915_hws_info, 0, (void *)VECS},
- {"i915_rstdby_delays", i915_rstdby_delays, 0},
{"i915_frequency_info", i915_frequency_info, 0},
- {"i915_delayfreq_table", i915_delayfreq_table, 0},
- {"i915_inttoext_table", i915_inttoext_table, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
- {"i915_gfxec", i915_gfxec, 0},
{"i915_fbc_status", i915_fbc_status, 0},
{"i915_ips_status", i915_ips_status, 0},
{"i915_sr_status", i915_sr_status, 0},
@@ -3823,6 +3933,9 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_pc8_status", i915_pc8_status, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
+ {"i915_semaphore_status", i915_semaphore_status, 0},
+ {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
+ {"i915_dp_mst_info", i915_dp_mst_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index d44344140627..2e7f03ad5ee2 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -138,7 +138,7 @@ static void i915_free_hws(struct drm_device *dev)
I915_WRITE(HWS_PGA, 0x1ffff000);
}
-void i915_kernel_lost_context(struct drm_device * dev)
+void i915_kernel_lost_context(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
@@ -166,7 +166,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
-static int i915_dma_cleanup(struct drm_device * dev)
+static int i915_dma_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
@@ -190,7 +190,7 @@ static int i915_dma_cleanup(struct drm_device * dev)
return 0;
}
-static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -235,7 +235,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
return 0;
}
-static int i915_dma_resume(struct drm_device * dev)
+static int i915_dma_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = LP_RING(dev_priv);
@@ -359,7 +359,7 @@ static int validate_cmd(int cmd)
return 0;
}
-static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
+static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i, ret;
@@ -369,6 +369,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
for (i = 0; i < dwords;) {
int sz = validate_cmd(buffer[i]);
+
if (sz == 0 || i + sz > dwords)
return -EINVAL;
i += sz;
@@ -453,7 +454,7 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
}
}
-static int i915_dispatch_cmdbuffer(struct drm_device * dev,
+static int i915_dispatch_cmdbuffer(struct drm_device *dev,
drm_i915_cmdbuffer_t *cmd,
struct drm_clip_rect *cliprects,
void *cmdbuf)
@@ -487,8 +488,8 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
return 0;
}
-static int i915_dispatch_batchbuffer(struct drm_device * dev,
- drm_i915_batchbuffer_t * batch,
+static int i915_dispatch_batchbuffer(struct drm_device *dev,
+ drm_i915_batchbuffer_t *batch,
struct drm_clip_rect *cliprects)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -549,7 +550,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
return 0;
}
-static int i915_dispatch_flip(struct drm_device * dev)
+static int i915_dispatch_flip(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv =
@@ -755,7 +756,7 @@ fail_batch_free:
return ret;
}
-static int i915_emit_irq(struct drm_device * dev)
+static int i915_emit_irq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -781,7 +782,7 @@ static int i915_emit_irq(struct drm_device * dev)
return dev_priv->dri1.counter;
}
-static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+static int i915_wait_irq(struct drm_device *dev, int irq_nr)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
@@ -1266,6 +1267,7 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
{
struct drm_device *dev = pci_get_drvdata(pdev);
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+
if (state == VGA_SWITCHEROO_ON) {
pr_info("switched on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
@@ -1338,6 +1340,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_gem_stolen;
+ dev_priv->pm._irqs_disabled = false;
+
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
@@ -1375,9 +1379,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
*/
intel_fbdev_initial_config(dev);
- /* Only enable hotplug handling once the fbdev is fully set up. */
- dev_priv->enable_hotplug_processing = true;
-
drm_kms_helper_poll_init(dev);
return 0;
@@ -1425,15 +1426,16 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
}
#if IS_ENABLED(CONFIG_FB)
-static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
struct apertures_struct *ap;
struct pci_dev *pdev = dev_priv->dev->pdev;
bool primary;
+ int ret;
ap = alloc_apertures(1);
if (!ap)
- return;
+ return -ENOMEM;
ap->ranges[0].base = dev_priv->gtt.mappable_base;
ap->ranges[0].size = dev_priv->gtt.mappable_end;
@@ -1441,13 +1443,16 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
- remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+ ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
kfree(ap);
+
+ return ret;
}
#else
-static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
+ return 0;
}
#endif
@@ -1492,10 +1497,11 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
#define SEP_EMPTY
#define PRINT_FLAG(name) info->name ? #name "," : ""
#define SEP_COMMA ,
- DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
+ DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
info->gen,
dev_priv->dev->pdev->device,
+ dev_priv->dev->pdev->revision,
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
#undef PRINT_S
#undef SEP_EMPTY
@@ -1594,7 +1600,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (dev_priv == NULL)
return -ENOMEM;
- dev->dev_private = (void *)dev_priv;
+ dev->dev_private = dev_priv;
dev_priv->dev = dev;
/* copy initial configuration to dev_priv->info */
@@ -1606,6 +1612,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
+ spin_lock_init(&dev_priv->mmio_flip_lock);
mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->modeset_restore_lock);
@@ -1664,7 +1671,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_gtt;
}
- i915_kick_out_firmware_fb(dev_priv);
+ ret = i915_kick_out_firmware_fb(dev_priv);
+ if (ret) {
+ DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+ goto out_gtt;
+ }
}
pci_set_master(dev->pdev);
@@ -1717,6 +1728,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_mtrrfree;
}
+ dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+ if (dev_priv->dp_wq == NULL) {
+ DRM_ERROR("Failed to create our dp workqueue.\n");
+ ret = -ENOMEM;
+ goto out_freewq;
+ }
+
intel_irq_init(dev);
intel_uncore_sanitize(dev);
@@ -1792,6 +1810,8 @@ out_gem_unload:
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
pm_qos_remove_request(&dev_priv->pm_qos);
+ destroy_workqueue(dev_priv->dp_wq);
+out_freewq:
destroy_workqueue(dev_priv->wq);
out_mtrrfree:
arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1892,6 +1912,7 @@ int i915_driver_unload(struct drm_device *dev)
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
+ destroy_workqueue(dev_priv->dp_wq);
destroy_workqueue(dev_priv->wq);
pm_qos_remove_request(&dev_priv->pm_qos);
@@ -1933,7 +1954,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
* and DMA structures, since the kernel won't be using them, and clea
* up any GEM state.
*/
-void i915_driver_lastclose(struct drm_device * dev)
+void i915_driver_lastclose(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1954,11 +1975,11 @@ void i915_driver_lastclose(struct drm_device * dev)
i915_dma_cleanup(dev);
}
-void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
{
mutex_lock(&dev->struct_mutex);
- i915_gem_context_close(dev, file_priv);
- i915_gem_release(dev, file_priv);
+ i915_gem_context_close(dev, file);
+ i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
}
@@ -2031,7 +2052,7 @@ int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
* manage the gtt, we need to claim that all intel devices are agp. For
* otherwise the drm core refuses to initialize the agp support code.
*/
-int i915_driver_device_is_agp(struct drm_device * dev)
+int i915_driver_device_is_agp(struct drm_device *dev)
{
return 1;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 651e65e051c0..6c4b25ce8bb0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -28,6 +28,7 @@
*/
#include <linux/device.h>
+#include <linux/acpi.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -46,8 +47,6 @@ static struct drm_driver driver;
PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
- .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
- .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
#define GEN_CHV_PIPEOFFSETS \
@@ -55,10 +54,6 @@ static struct drm_driver driver;
CHV_PIPE_C_OFFSET }, \
.trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
CHV_TRANSCODER_C_OFFSET, }, \
- .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET, \
- CHV_DPLL_C_OFFSET }, \
- .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET, \
- CHV_DPLL_C_MD_OFFSET }, \
.palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
CHV_PALETTE_C_OFFSET }
@@ -308,6 +303,7 @@ static const struct intel_device_info intel_broadwell_d_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -319,6 +315,7 @@ static const struct intel_device_info intel_broadwell_m_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -330,6 +327,7 @@ static const struct intel_device_info intel_broadwell_gt3d_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -341,6 +339,7 @@ static const struct intel_device_info intel_broadwell_gt3m_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.has_llc = 1,
.has_ddi = 1,
+ .has_fpga_dbg = 1,
.has_fbc = 1,
GEN_DEFAULT_PIPEOFFSETS,
IVB_CURSOR_OFFSETS,
@@ -482,10 +481,6 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
if (i915.semaphores >= 0)
return i915.semaphores;
- /* Until we get further testing... */
- if (IS_GEN8(dev))
- return false;
-
#ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
@@ -499,8 +494,7 @@ static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
-
- intel_runtime_pm_get(dev_priv);
+ pci_power_t opregion_target_state;
/* ignore lid events during suspend */
mutex_lock(&dev_priv->modeset_restore_lock);
@@ -526,21 +520,23 @@ static int i915_drm_freeze(struct drm_device *dev)
return error;
}
- drm_irq_uninstall(dev);
- dev_priv->enable_hotplug_processing = false;
-
- intel_disable_gt_powersave(dev);
-
/*
* Disable CRTCs directly since we want to preserve sw state
- * for _thaw.
+ * for _thaw. Also, power gate the CRTC power wells.
*/
drm_modeset_lock_all(dev);
- for_each_crtc(dev, crtc) {
- dev_priv->display.crtc_disable(crtc);
- }
+ for_each_crtc(dev, crtc)
+ intel_crtc_control(crtc, false);
drm_modeset_unlock_all(dev);
+ intel_dp_mst_suspend(dev);
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ intel_runtime_pm_disable_interrupts(dev);
+
+ intel_suspend_gt_powersave(dev);
+
intel_modeset_suspend_hw(dev);
}
@@ -548,8 +544,15 @@ static int i915_drm_freeze(struct drm_device *dev)
i915_save_state(dev);
+ opregion_target_state = PCI_D3cold;
+#if IS_ENABLED(CONFIG_ACPI_SLEEP)
+ if (acpi_target_system_state() < ACPI_STATE_S3)
+ opregion_target_state = PCI_D1;
+#endif
+ intel_opregion_notify_adapter(dev, opregion_target_state);
+
+ intel_uncore_forcewake_reset(dev, false);
intel_opregion_fini(dev);
- intel_uncore_fini(dev);
console_lock();
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
@@ -557,6 +560,8 @@ static int i915_drm_freeze(struct drm_device *dev)
dev_priv->suspend_count++;
+ intel_display_set_init_power(dev_priv, false);
+
return 0;
}
@@ -606,7 +611,10 @@ static int i915_drm_thaw_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- intel_uncore_early_sanitize(dev);
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ hsw_disable_pc8(dev_priv);
+
+ intel_uncore_early_sanitize(dev, true);
intel_uncore_sanitize(dev);
intel_power_domains_init_hw(dev_priv);
@@ -639,11 +647,19 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
}
mutex_unlock(&dev->struct_mutex);
- /* We need working interrupts for modeset enabling ... */
- drm_irq_install(dev, dev->pdev->irq);
+ intel_runtime_pm_restore_interrupts(dev);
intel_modeset_init_hw(dev);
+ {
+ unsigned long irqflags;
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (dev_priv->display.hpd_irq_setup)
+ dev_priv->display.hpd_irq_setup(dev);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
+
+ intel_dp_mst_resume(dev);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
@@ -655,7 +671,6 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
* notifications.
* */
intel_hpd_init(dev);
- dev_priv->enable_hotplug_processing = true;
/* Config may have changed between suspend and resume */
drm_helper_hpd_irq_event(dev);
}
@@ -678,7 +693,8 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_opregion_notify_adapter(dev, PCI_D0);
+
return 0;
}
@@ -887,6 +903,7 @@ static int i915_pm_suspend_late(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_i915_private *dev_priv = drm_dev->dev_private;
/*
* We have a suspedn ordering issue with the snd-hda driver also
@@ -900,6 +917,9 @@ static int i915_pm_suspend_late(struct device *dev)
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
+ if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
+ hsw_enable_pc8(dev_priv);
+
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1f7700897dfc..a26eec285da7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -53,7 +53,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20080730"
+#define DRIVER_DATE "20140620"
enum pipe {
INVALID_PIPE = -1,
@@ -129,6 +129,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_OTHER,
POWER_DOMAIN_VGA,
POWER_DOMAIN_AUDIO,
+ POWER_DOMAIN_PLLS,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
@@ -178,14 +179,20 @@ enum hpd_pin {
list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
if ((intel_connector)->base.encoder == (__encoder))
+#define for_each_power_domain(domain, mask) \
+ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
+ if ((1 << (domain)) & (mask))
+
struct drm_i915_private;
struct i915_mmu_object;
enum intel_dpll_id {
DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
/* real shared dpll ids must be >= 0 */
- DPLL_ID_PCH_PLL_A,
- DPLL_ID_PCH_PLL_B,
+ DPLL_ID_PCH_PLL_A = 0,
+ DPLL_ID_PCH_PLL_B = 1,
+ DPLL_ID_WRPLL1 = 0,
+ DPLL_ID_WRPLL2 = 1,
};
#define I915_NUM_PLLS 2
@@ -194,6 +201,7 @@ struct intel_dpll_hw_state {
uint32_t dpll_md;
uint32_t fp0;
uint32_t fp1;
+ uint32_t wrpll;
};
struct intel_shared_dpll {
@@ -204,6 +212,8 @@ struct intel_shared_dpll {
/* should match the index in the dev_priv->shared_dplls array */
enum intel_dpll_id id;
struct intel_dpll_hw_state hw_state;
+ /* The mode_set hook is optional and should be used together with the
+ * intel_prepare_shared_dpll function. */
void (*mode_set)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
void (*enable)(struct drm_i915_private *dev_priv,
@@ -228,12 +238,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n);
-struct intel_ddi_plls {
- int spll_refcount;
- int wrpll1_refcount;
- int wrpll2_refcount;
-};
-
/* Interface history:
*
* 1.1: Original.
@@ -324,6 +328,7 @@ struct drm_i915_error_state {
u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
+ struct drm_i915_error_object *semaphore_obj;
struct drm_i915_error_ring {
bool valid;
@@ -435,8 +440,8 @@ struct drm_i915_display_funcs {
void (*update_wm)(struct drm_crtc *crtc);
void (*update_sprite_wm)(struct drm_plane *plane,
struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
- bool enable, bool scaled);
+ uint32_t sprite_width, uint32_t sprite_height,
+ int pixel_size, bool enable, bool scaled);
void (*modeset_global_resources)(struct drm_device *dev);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
@@ -552,8 +557,6 @@ struct intel_device_info {
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
int trans_offsets[I915_MAX_TRANSCODERS];
- int dpll_offsets[I915_MAX_PIPES];
- int dpll_md_offsets[I915_MAX_PIPES];
int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES];
};
@@ -586,28 +589,48 @@ struct i915_ctx_hang_stats {
};
/* This must match up with the value previously used for execbuf2.rsvd1. */
-#define DEFAULT_CONTEXT_ID 0
+#define DEFAULT_CONTEXT_HANDLE 0
+/**
+ * struct intel_context - as the name implies, represents a context.
+ * @ref: reference count.
+ * @user_handle: userspace tracking identity for this context.
+ * @remap_slice: l3 row remapping information.
+ * @file_priv: filp associated with this context (NULL for global default
+ * context).
+ * @hang_stats: information about the role of this context in possible GPU
+ * hangs.
+ * @vm: virtual memory space used by this context.
+ * @legacy_hw_ctx: render context backing object and whether it is correctly
+ * initialized (legacy ring submission mechanism only).
+ * @link: link in the global list of contexts.
+ *
+ * Contexts are memory images used by the hardware to store copies of their
+ * internal state.
+ */
struct intel_context {
struct kref ref;
- int id;
- bool is_initialized;
+ int user_handle;
uint8_t remap_slice;
struct drm_i915_file_private *file_priv;
- struct intel_engine_cs *last_ring;
- struct drm_i915_gem_object *obj;
struct i915_ctx_hang_stats hang_stats;
struct i915_address_space *vm;
+ struct {
+ struct drm_i915_gem_object *rcs_state;
+ bool initialized;
+ } legacy_hw_ctx;
+
struct list_head link;
};
struct i915_fbc {
unsigned long size;
+ unsigned threshold;
unsigned int fb_id;
enum plane plane;
int y;
- struct drm_mm_node *compressed_fb;
+ struct drm_mm_node compressed_fb;
struct drm_mm_node *compressed_llb;
struct intel_fbc_work {
@@ -635,9 +658,15 @@ struct i915_drrs {
struct intel_connector *connector;
};
+struct intel_dp;
struct i915_psr {
+ struct mutex lock;
bool sink_support;
bool source_ok;
+ struct intel_dp *enabled;
+ bool active;
+ struct delayed_work work;
+ unsigned busy_frontbuffer_bits;
};
enum intel_pch {
@@ -880,6 +909,12 @@ struct vlv_s0ix_state {
u32 clock_gate_dis2;
};
+struct intel_rps_ei {
+ u32 cz_clock;
+ u32 render_c0;
+ u32 media_c0;
+};
+
struct intel_gen6_power_mgmt {
/* work and pm_iir are protected by dev_priv->irq_lock */
struct work_struct work;
@@ -903,6 +938,9 @@ struct intel_gen6_power_mgmt {
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
u8 rp1_freq; /* "less than" RP0 power/freqency */
u8 rp0_freq; /* Non-overclocked max frequency. */
+ u32 cz_freq;
+
+ u32 ei_interrupt_count;
int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
@@ -910,6 +948,9 @@ struct intel_gen6_power_mgmt {
bool enabled;
struct delayed_work delayed_resume_work;
+ /* manual wa residency calculations */
+ struct intel_rps_ei up_ei, down_ei;
+
/*
* Protects RPS/RC6 register access and PCU communication.
* Must be taken after struct_mutex if nested.
@@ -1230,6 +1271,7 @@ struct intel_vbt_data {
u16 pwm_freq_hz;
bool present;
bool active_low_pwm;
+ u8 min_brightness; /* min_brightness/255 of max */
} backlight;
/* MIPI DSI */
@@ -1299,7 +1341,7 @@ struct ilk_wm_values {
*/
struct i915_runtime_pm {
bool suspended;
- bool irqs_disabled;
+ bool _irqs_disabled;
};
enum intel_pipe_crc_source {
@@ -1332,6 +1374,17 @@ struct intel_pipe_crc {
wait_queue_head_t wq;
};
+struct i915_frontbuffer_tracking {
+ struct mutex lock;
+
+ /*
+ * Tracking bits for delayed frontbuffer flushing du to gpu activity or
+ * scheduled flips.
+ */
+ unsigned busy_bits;
+ unsigned flip_bits;
+};
+
struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@@ -1363,6 +1416,7 @@ struct drm_i915_private {
struct pci_dev *bridge_dev;
struct intel_engine_cs ring[I915_NUM_RINGS];
+ struct drm_i915_gem_object *semaphore_obj;
uint32_t last_seqno, next_seqno;
drm_dma_handle_t *status_page_dmah;
@@ -1371,6 +1425,9 @@ struct drm_i915_private {
/* protects the irq masks */
spinlock_t irq_lock;
+ /* protects the mmio flip data */
+ spinlock_t mmio_flip_lock;
+
bool display_irqs_enabled;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
@@ -1390,7 +1447,6 @@ struct drm_i915_private {
u32 pipestat_irq_mask[I915_MAX_PIPES];
struct work_struct hotplug_work;
- bool enable_hotplug_processing;
struct {
unsigned long hpd_last_jiffies;
int hpd_cnt;
@@ -1467,7 +1523,6 @@ struct drm_i915_private {
int num_shared_dpll;
struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
- struct intel_ddi_plls ddi_plls;
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
/* Reclocking support */
@@ -1475,6 +1530,9 @@ struct drm_i915_private {
bool lvds_downclock_avail;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
+
+ struct i915_frontbuffer_tracking fb_tracking;
+
u16 orig_clock;
bool mchbar_need_disable;
@@ -1541,6 +1599,20 @@ struct drm_i915_private {
struct i915_runtime_pm pm;
+ struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
+ u32 long_hpd_port_mask;
+ u32 short_hpd_port_mask;
+ struct work_struct dig_port_work;
+
+ /*
+ * if we get a HPD irq from DP and a HPD irq from non-DP
+ * the non-DP HPD could block the workqueue on a mode config
+ * mutex getting, that userspace may have taken. However
+ * userspace is waiting on the DP workqueue to run which is
+ * blocked behind the non-DP one.
+ */
+ struct workqueue_struct *dp_wq;
+
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
@@ -1592,6 +1664,28 @@ struct drm_i915_gem_object_ops {
void (*release)(struct drm_i915_gem_object *);
};
+/*
+ * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
+ * considered to be the frontbuffer for the given plane interface-vise. This
+ * doesn't mean that the hw necessarily already scans it out, but that any
+ * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
+ *
+ * We have one bit per pipe and per scanout plane type.
+ */
+#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4
+#define INTEL_FRONTBUFFER_BITS \
+ (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
+#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
+ (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+#define INTEL_FRONTBUFFER_CURSOR(pipe) \
+ (1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_SPRITE(pipe) \
+ (1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
+ (1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
+ (0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+
struct drm_i915_gem_object {
struct drm_gem_object base;
@@ -1662,6 +1756,12 @@ struct drm_i915_gem_object {
unsigned int pin_display:1;
/*
+ * Is the object to be mapped as read-only to the GPU
+ * Only honoured if hardware has relevant pte bit
+ */
+ unsigned long gt_ro:1;
+
+ /*
* Is the GPU currently using a fence to access this buffer,
*/
unsigned int pending_fenced_gpu_access:1;
@@ -1673,6 +1773,8 @@ struct drm_i915_gem_object {
unsigned int has_global_gtt_mapping:1;
unsigned int has_dma_mapping:1;
+ unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+
struct sg_table *pages;
int pages_pin_count;
@@ -1719,6 +1821,10 @@ struct drm_i915_gem_object {
};
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+ struct drm_i915_gem_object *new,
+ unsigned frontbuffer_bits);
+
/**
* Request queue structure.
*
@@ -1940,10 +2046,8 @@ struct drm_i915_cmd_table {
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
-#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6 && \
- (!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
-#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 \
- && !IS_GEN8(dev))
+#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
+#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
#define USES_PPGTT(dev) intel_enable_ppgtt(dev, false)
#define USES_FULL_PPGTT(dev) intel_enable_ppgtt(dev, true)
@@ -1998,6 +2102,8 @@ struct drm_i915_cmd_table {
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
+#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
+
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
@@ -2040,6 +2146,8 @@ struct i915_params {
bool reset;
bool disable_display;
bool disable_vtd_wa;
+ int use_mmio_flip;
+ bool mmio_debug;
};
extern struct i915_params i915 __read_mostly;
@@ -2048,12 +2156,12 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
-extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
+extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
extern void i915_driver_lastclose(struct drm_device * dev);
extern void i915_driver_preclose(struct drm_device *dev,
- struct drm_file *file_priv);
+ struct drm_file *file);
extern void i915_driver_postclose(struct drm_device *dev,
- struct drm_file *file_priv);
+ struct drm_file *file);
extern int i915_driver_device_is_agp(struct drm_device * dev);
#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
@@ -2084,10 +2192,12 @@ extern void intel_irq_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
extern void intel_uncore_sanitize(struct drm_device *dev);
-extern void intel_uncore_early_sanitize(struct drm_device *dev);
+extern void intel_uncore_early_sanitize(struct drm_device *dev,
+ bool restore_forcewake);
extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
extern void intel_uncore_fini(struct drm_device *dev);
+extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -2235,6 +2345,8 @@ bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
+int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
+
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
return unlikely(atomic_read(&error->reset_counter)
@@ -2404,7 +2516,7 @@ static inline void i915_gem_context_unreference(struct intel_context *ctx)
static inline bool i915_gem_context_is_default(const struct intel_context *c)
{
- return c->id == DEFAULT_CONTEXT_ID;
+ return c->user_handle == DEFAULT_CONTEXT_HANDLE;
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -2435,7 +2547,7 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
@@ -2445,7 +2557,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 stolen_offset,
u32 gtt_offset,
u32 size);
-void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@ -2593,8 +2704,8 @@ extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
-extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
-extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
+extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
+ bool enable);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev);
@@ -2605,6 +2716,8 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
+void intel_notify_mmio_flip(struct intel_engine_cs *ring);
+
/* overlay */
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
@@ -2700,10 +2813,10 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
{
- if (HAS_PCH_SPLIT(dev))
- return CPU_VGACNTRL;
- else if (IS_VALLEYVIEW(dev))
+ if (IS_VALLEYVIEW(dev))
return VLV_VGACNTRL;
+ else if (INTEL_INFO(dev)->gen >= 5)
+ return CPU_VGACNTRL;
else
return VGACNTRL;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f247d922e44a..215185050ff1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1095,7 +1095,7 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
* Compare seqno against outstanding lazy request. Emit a request if they are
* equal.
*/
-static int
+int
i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
{
int ret;
@@ -1161,14 +1161,14 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
s64 before, now;
int ret;
- WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
+ WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
- if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
+ if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
gen6_rps_boost(dev_priv);
if (file_priv)
mod_delayed_work(dev_priv->wq,
@@ -1560,14 +1560,29 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret)
goto unpin;
- obj->fault_mappable = true;
-
+ /* Finally, remap it using the new GTT offset */
pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
pfn >>= PAGE_SHIFT;
- pfn += page_offset;
- /* Finally, remap it using the new GTT offset */
- ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+ if (!obj->fault_mappable) {
+ unsigned long size = min_t(unsigned long,
+ vma->vm_end - vma->vm_start,
+ obj->base.size);
+ int i;
+
+ for (i = 0; i < size >> PAGE_SHIFT; i++) {
+ ret = vm_insert_pfn(vma,
+ (unsigned long)vma->vm_start + i * PAGE_SIZE,
+ pfn + i);
+ if (ret)
+ break;
+ }
+
+ obj->fault_mappable = true;
+ } else
+ ret = vm_insert_pfn(vma,
+ (unsigned long)vmf->virtual_address,
+ pfn + page_offset);
unpin:
i915_gem_object_ggtt_unpin(obj);
unlock:
@@ -2051,16 +2066,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* our own buffer, now let the real VM do its job and
* go down in flames if truly OOM.
*/
- gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
- gfp |= __GFP_IO | __GFP_WAIT;
-
i915_gem_shrink_all(dev_priv);
- page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+ page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
goto err_pages;
-
- gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
- gfp &= ~(__GFP_IO | __GFP_WAIT);
}
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@@ -2209,6 +2218,8 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
list_move_tail(&vma->mm_list, &vm->inactive_list);
}
+ intel_fb_obj_flush(obj, true);
+
list_del_init(&obj->ring_list);
obj->ring = NULL;
@@ -2318,7 +2329,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
u32 request_ring_position, request_start;
int ret;
- request_start = intel_ring_get_tail(ring);
+ request_start = intel_ring_get_tail(ring->buffer);
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
@@ -2339,7 +2350,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
* GPU processing the request, we never over-estimate the
* position of the head.
*/
- request_ring_position = intel_ring_get_tail(ring);
+ request_ring_position = intel_ring_get_tail(ring->buffer);
ret = ring->add_request(ring);
if (ret)
@@ -2822,6 +2833,8 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
idx = intel_ring_sync_index(from, to);
seqno = obj->last_read_seqno;
+ /* Optimization: Avoid semaphore sync when we are sure we already
+ * waited for an object with higher seqno */
if (seqno <= from->semaphore.sync_seqno[idx])
return 0;
@@ -2905,8 +2918,6 @@ int i915_vma_unbind(struct i915_vma *vma)
vma->unbind_vma(vma);
- i915_gem_gtt_finish_object(obj);
-
list_del_init(&vma->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
if (i915_is_ggtt(vma->vm))
@@ -2917,8 +2928,10 @@ int i915_vma_unbind(struct i915_vma *vma)
/* Since the unbound list is global, only move to that list if
* no more VMAs exist. */
- if (list_empty(&obj->vma_list))
+ if (list_empty(&obj->vma_list)) {
+ i915_gem_gtt_finish_object(obj);
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+ }
/* And finally now the object is completely decoupled from this vma,
* we can drop its hold on the backing storage and allow it to be
@@ -3530,6 +3543,8 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
+ intel_fb_obj_flush(obj, false);
+
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
old_write_domain);
@@ -3551,6 +3566,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
+ intel_fb_obj_flush(obj, false);
+
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
old_write_domain);
@@ -3604,6 +3621,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
obj->dirty = 1;
}
+ if (write)
+ intel_fb_obj_invalidate(obj, NULL);
+
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
@@ -3940,6 +3960,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
+ if (write)
+ intel_fb_obj_invalidate(obj, NULL);
+
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
@@ -4428,13 +4451,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->stolen)
i915_gem_object_unpin_pages(obj);
+ WARN_ON(obj->frontbuffer_bits);
+
if (WARN_ON(obj->pages_pin_count))
obj->pages_pin_count = 0;
if (discard_backing_storage(obj))
obj->madv = I915_MADV_DONTNEED;
i915_gem_object_put_pages(obj);
i915_gem_object_free_mmap_offset(obj);
- i915_gem_object_release_stolen(obj);
BUG_ON(obj->pages);
@@ -4912,6 +4936,8 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
register_oom_notifier(&dev_priv->mm.oom_notifier);
+
+ mutex_init(&dev_priv->fb_tracking.lock);
}
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
@@ -4973,6 +4999,23 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
return ret;
}
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+ struct drm_i915_gem_object *new,
+ unsigned frontbuffer_bits)
+{
+ if (old) {
+ WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
+ WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
+ old->frontbuffer_bits &= ~frontbuffer_bits;
+ }
+
+ if (new) {
+ WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
+ WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
+ new->frontbuffer_bits |= frontbuffer_bits;
+ }
+}
+
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
{
if (!mutex_is_locked(mutex))
@@ -5055,12 +5098,13 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
vm == &dev_priv->mm.aliasing_ppgtt->base)
vm = &dev_priv->gtt.base;
- BUG_ON(list_empty(&o->vma_list));
list_for_each_entry(vma, &o->vma_list, vma_link) {
if (vma->vm == vm)
return vma->node.start;
}
+ WARN(1, "%s vma for this object not found.\n",
+ i915_is_ggtt(vm) ? "global" : "ppgtt");
return -1;
}
@@ -5141,8 +5185,11 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
bool was_interruptible;
bool unlock;
- while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout)
+ while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
schedule_timeout_killable(1);
+ if (fatal_signal_pending(current))
+ return NOTIFY_DONE;
+ }
if (timeout == 0) {
pr_err("Unable to purge GPU memory due lock contention.\n");
return NOTIFY_DONE;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a5ddf3bce9c3..3b99390e467a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -182,22 +182,50 @@ void i915_gem_context_free(struct kref *ctx_ref)
typeof(*ctx), ref);
struct i915_hw_ppgtt *ppgtt = NULL;
- if (ctx->obj) {
+ if (ctx->legacy_hw_ctx.rcs_state) {
/* We refcount even the aliasing PPGTT to keep the code symmetric */
- if (USES_PPGTT(ctx->obj->base.dev))
+ if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
ppgtt = ctx_to_ppgtt(ctx);
-
- /* XXX: Free up the object before tearing down the address space, in
- * case we're bound in the PPGTT */
- drm_gem_object_unreference(&ctx->obj->base);
}
if (ppgtt)
kref_put(&ppgtt->ref, ppgtt_release);
+ if (ctx->legacy_hw_ctx.rcs_state)
+ drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
list_del(&ctx->link);
kfree(ctx);
}
+static struct drm_i915_gem_object *
+i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
+{
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ obj = i915_gem_alloc_object(dev, size);
+ if (obj == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Try to make the context utilize L3 as well as LLC.
+ *
+ * On VLV we don't have L3 controls in the PTEs so we
+ * shouldn't touch the cache level, especially as that
+ * would make the object snooped which might have a
+ * negative performance impact.
+ */
+ if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
+ /* Failure shouldn't ever happen this early */
+ if (WARN_ON(ret)) {
+ drm_gem_object_unreference(&obj->base);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return obj;
+}
+
static struct i915_hw_ppgtt *
create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
{
@@ -234,40 +262,26 @@ __create_hw_context(struct drm_device *dev,
list_add_tail(&ctx->link, &dev_priv->context_list);
if (dev_priv->hw_context_size) {
- ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
- if (ctx->obj == NULL) {
- ret = -ENOMEM;
+ struct drm_i915_gem_object *obj =
+ i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
goto err_out;
}
-
- /*
- * Try to make the context utilize L3 as well as LLC.
- *
- * On VLV we don't have L3 controls in the PTEs so we
- * shouldn't touch the cache level, especially as that
- * would make the object snooped which might have a
- * negative performance impact.
- */
- if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
- ret = i915_gem_object_set_cache_level(ctx->obj,
- I915_CACHE_L3_LLC);
- /* Failure shouldn't ever happen this early */
- if (WARN_ON(ret))
- goto err_out;
- }
+ ctx->legacy_hw_ctx.rcs_state = obj;
}
/* Default context will never have a file_priv */
if (file_priv != NULL) {
ret = idr_alloc(&file_priv->context_idr, ctx,
- DEFAULT_CONTEXT_ID, 0, GFP_KERNEL);
+ DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
if (ret < 0)
goto err_out;
} else
- ret = DEFAULT_CONTEXT_ID;
+ ret = DEFAULT_CONTEXT_HANDLE;
ctx->file_priv = file_priv;
- ctx->id = ret;
+ ctx->user_handle = ret;
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
@@ -301,7 +315,7 @@ i915_gem_create_context(struct drm_device *dev,
if (IS_ERR(ctx))
return ctx;
- if (is_global_default_ctx && ctx->obj) {
+ if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
/* We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
@@ -309,7 +323,7 @@ i915_gem_create_context(struct drm_device *dev,
* be available. To avoid this we always pin the default
* context.
*/
- ret = i915_gem_obj_ggtt_pin(ctx->obj,
+ ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
get_context_alignment(dev), 0);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
@@ -349,8 +363,8 @@ i915_gem_create_context(struct drm_device *dev,
return ctx;
err_unpin:
- if (is_global_default_ctx && ctx->obj)
- i915_gem_object_ggtt_unpin(ctx->obj);
+ if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state)
+ i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
err_destroy:
i915_gem_context_unreference(ctx);
return ERR_PTR(ret);
@@ -366,23 +380,27 @@ void i915_gem_context_reset(struct drm_device *dev)
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_engine_cs *ring = &dev_priv->ring[i];
struct intel_context *dctx = ring->default_context;
+ struct intel_context *lctx = ring->last_context;
/* Do a fake switch to the default context */
- if (ring->last_context == dctx)
+ if (lctx == dctx)
continue;
- if (!ring->last_context)
+ if (!lctx)
continue;
- if (dctx->obj && i == RCS) {
- WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
+ if (dctx->legacy_hw_ctx.rcs_state && i == RCS) {
+ WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state,
get_context_alignment(dev), 0));
/* Fake a finish/inactive */
- dctx->obj->base.write_domain = 0;
- dctx->obj->active = 0;
+ dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0;
+ dctx->legacy_hw_ctx.rcs_state->active = 0;
}
- i915_gem_context_unreference(ring->last_context);
+ if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
+ i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
+
+ i915_gem_context_unreference(lctx);
i915_gem_context_reference(dctx);
ring->last_context = dctx;
}
@@ -429,7 +447,7 @@ void i915_gem_context_fini(struct drm_device *dev)
struct intel_context *dctx = dev_priv->ring[RCS].default_context;
int i;
- if (dctx->obj) {
+ if (dctx->legacy_hw_ctx.rcs_state) {
/* The only known way to stop the gpu from accessing the hw context is
* to reset it. Do this as the very last operation to avoid confusing
* other code, leading to spurious errors. */
@@ -444,13 +462,13 @@ void i915_gem_context_fini(struct drm_device *dev)
WARN_ON(!dev_priv->ring[RCS].last_context);
if (dev_priv->ring[RCS].last_context == dctx) {
/* Fake switch to NULL context */
- WARN_ON(dctx->obj->active);
- i915_gem_object_ggtt_unpin(dctx->obj);
+ WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
+ i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(dctx);
dev_priv->ring[RCS].last_context = NULL;
}
- i915_gem_object_ggtt_unpin(dctx->obj);
+ i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
}
for (i = 0; i < I915_NUM_RINGS; i++) {
@@ -570,7 +588,7 @@ mi_set_context(struct intel_engine_cs *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -602,16 +620,16 @@ static int do_switch(struct intel_engine_cs *ring,
int ret, i;
if (from != NULL && ring == &dev_priv->ring[RCS]) {
- BUG_ON(from->obj == NULL);
- BUG_ON(!i915_gem_obj_is_pinned(from->obj));
+ BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
+ BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
}
- if (from == to && from->last_ring == ring && !to->remap_slice)
+ if (from == to && !to->remap_slice)
return 0;
/* Trying to pin first makes error handling easier. */
if (ring == &dev_priv->ring[RCS]) {
- ret = i915_gem_obj_ggtt_pin(to->obj,
+ ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
get_context_alignment(ring->dev), 0);
if (ret)
return ret;
@@ -644,17 +662,17 @@ static int do_switch(struct intel_engine_cs *ring,
*
* XXX: We need a real interface to do this instead of trickery.
*/
- ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
+ ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false);
if (ret)
goto unpin_out;
- if (!to->obj->has_global_gtt_mapping) {
- struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
+ if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
+ struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
&dev_priv->gtt.base);
- vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
+ vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
}
- if (!to->is_initialized || i915_gem_context_is_default(to))
+ if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
hw_flags |= MI_RESTORE_INHIBIT;
ret = mi_set_context(ring, to, hw_flags);
@@ -680,8 +698,8 @@ static int do_switch(struct intel_engine_cs *ring,
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
- from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
+ from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@@ -689,21 +707,20 @@ static int do_switch(struct intel_engine_cs *ring,
* able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet.
*/
- from->obj->dirty = 1;
- BUG_ON(from->obj->ring != ring);
+ from->legacy_hw_ctx.rcs_state->dirty = 1;
+ BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring);
/* obj is kept alive until the next request by its active ref */
- i915_gem_object_ggtt_unpin(from->obj);
+ i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(from);
}
- uninitialized = !to->is_initialized && from == NULL;
- to->is_initialized = true;
+ uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
+ to->legacy_hw_ctx.initialized = true;
done:
i915_gem_context_reference(to);
ring->last_context = to;
- to->last_ring = ring;
if (uninitialized) {
ret = i915_gem_render_state_init(ring);
@@ -715,7 +732,7 @@ done:
unpin_out:
if (ring->id == RCS)
- i915_gem_object_ggtt_unpin(to->obj);
+ i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
return ret;
}
@@ -736,7 +753,7 @@ int i915_switch_context(struct intel_engine_cs *ring,
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
- if (to->obj == NULL) { /* We have the fake context */
+ if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
if (to != ring->last_context) {
i915_gem_context_reference(to);
if (ring->last_context)
@@ -774,7 +791,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- args->ctx_id = ctx->id;
+ args->ctx_id = ctx->user_handle;
DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
return 0;
@@ -788,7 +805,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct intel_context *ctx;
int ret;
- if (args->ctx_id == DEFAULT_CONTEXT_ID)
+ if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
return -ENOENT;
ret = i915_mutex_lock_interruptible(dev);
@@ -801,7 +818,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
return PTR_ERR(ctx);
}
- idr_remove(&ctx->file_priv->context_idr, ctx->id);
+ idr_remove(&ctx->file_priv->context_idr, ctx->user_handle);
i915_gem_context_unreference(ctx);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3a30133f93e8..60998fc4e5b2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -938,7 +938,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
struct intel_context *ctx = NULL;
struct i915_ctx_hang_stats *hs;
- if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
+ if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
return ERR_PTR(-EINVAL);
ctx = i915_gem_context_get(file->driver_priv, ctx_id);
@@ -975,10 +975,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
- /* check for potential scanout */
- if (i915_gem_obj_ggtt_bound(obj) &&
- i915_gem_obj_to_ggtt(obj)->pin_count)
- intel_mark_fb_busy(obj, ring);
+
+ intel_fb_obj_invalidate(obj, ring);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1028,6 +1026,163 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
return 0;
}
+static int
+legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
+ struct intel_engine_cs *ring,
+ struct intel_context *ctx,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct list_head *vmas,
+ struct drm_i915_gem_object *batch_obj,
+ u64 exec_start, u32 flags)
+{
+ struct drm_clip_rect *cliprects = NULL;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u64 exec_len;
+ int instp_mode;
+ u32 instp_mask;
+ int i, ret = 0;
+
+ if (args->num_cliprects != 0) {
+ if (ring != &dev_priv->ring[RCS]) {
+ DRM_DEBUG("clip rectangles are only valid with the render ring\n");
+ return -EINVAL;
+ }
+
+ if (INTEL_INFO(dev)->gen >= 5) {
+ DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
+ return -EINVAL;
+ }
+
+ if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
+ DRM_DEBUG("execbuf with %u cliprects\n",
+ args->num_cliprects);
+ return -EINVAL;
+ }
+
+ cliprects = kcalloc(args->num_cliprects,
+ sizeof(*cliprects),
+ GFP_KERNEL);
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (copy_from_user(cliprects,
+ to_user_ptr(args->cliprects_ptr),
+ sizeof(*cliprects)*args->num_cliprects)) {
+ ret = -EFAULT;
+ goto error;
+ }
+ } else {
+ if (args->DR4 == 0xffffffff) {
+ DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
+ args->DR4 = 0;
+ }
+
+ if (args->DR1 || args->DR4 || args->cliprects_ptr) {
+ DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
+ return -EINVAL;
+ }
+ }
+
+ ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
+ if (ret)
+ goto error;
+
+ ret = i915_switch_context(ring, ctx);
+ if (ret)
+ goto error;
+
+ instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+ instp_mask = I915_EXEC_CONSTANTS_MASK;
+ switch (instp_mode) {
+ case I915_EXEC_CONSTANTS_REL_GENERAL:
+ case I915_EXEC_CONSTANTS_ABSOLUTE:
+ case I915_EXEC_CONSTANTS_REL_SURFACE:
+ if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
+ DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (instp_mode != dev_priv->relative_constants_mode) {
+ if (INTEL_INFO(dev)->gen < 4) {
+ DRM_DEBUG("no rel constants on pre-gen4\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (INTEL_INFO(dev)->gen > 5 &&
+ instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
+ DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* The HW changed the meaning on this bit on gen6 */
+ if (INTEL_INFO(dev)->gen >= 6)
+ instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
+ }
+ break;
+ default:
+ DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (ring == &dev_priv->ring[RCS] &&
+ instp_mode != dev_priv->relative_constants_mode) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto error;
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, INSTPM);
+ intel_ring_emit(ring, instp_mask << 16 | instp_mode);
+ intel_ring_advance(ring);
+
+ dev_priv->relative_constants_mode = instp_mode;
+ }
+
+ if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+ ret = i915_reset_gen7_sol_offsets(dev, ring);
+ if (ret)
+ goto error;
+ }
+
+ exec_len = args->batch_len;
+ if (cliprects) {
+ for (i = 0; i < args->num_cliprects; i++) {
+ ret = i915_emit_box(dev, &cliprects[i],
+ args->DR1, args->DR4);
+ if (ret)
+ goto error;
+
+ ret = ring->dispatch_execbuffer(ring,
+ exec_start, exec_len,
+ flags);
+ if (ret)
+ goto error;
+ }
+ } else {
+ ret = ring->dispatch_execbuffer(ring,
+ exec_start, exec_len,
+ flags);
+ if (ret)
+ return ret;
+ }
+
+ trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+
+ i915_gem_execbuffer_move_to_active(vmas, ring);
+ i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+
+error:
+ kfree(cliprects);
+ return ret;
+}
+
/**
* Find one BSD ring to dispatch the corresponding BSD command.
* The Ring ID is returned.
@@ -1087,14 +1242,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
- struct drm_clip_rect *cliprects = NULL;
struct intel_engine_cs *ring;
struct intel_context *ctx;
struct i915_address_space *vm;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
- u64 exec_start = args->batch_start_offset, exec_len;
- u32 mask, flags;
- int ret, mode, i;
+ u64 exec_start = args->batch_start_offset;
+ u32 flags;
+ int ret;
bool need_relocs;
if (!i915_gem_check_execbuffer(args))
@@ -1138,87 +1292,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
return -EINVAL;
}
- mode = args->flags & I915_EXEC_CONSTANTS_MASK;
- mask = I915_EXEC_CONSTANTS_MASK;
- switch (mode) {
- case I915_EXEC_CONSTANTS_REL_GENERAL:
- case I915_EXEC_CONSTANTS_ABSOLUTE:
- case I915_EXEC_CONSTANTS_REL_SURFACE:
- if (mode != 0 && ring != &dev_priv->ring[RCS]) {
- DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
- return -EINVAL;
- }
-
- if (mode != dev_priv->relative_constants_mode) {
- if (INTEL_INFO(dev)->gen < 4) {
- DRM_DEBUG("no rel constants on pre-gen4\n");
- return -EINVAL;
- }
-
- if (INTEL_INFO(dev)->gen > 5 &&
- mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
- DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
- return -EINVAL;
- }
-
- /* The HW changed the meaning on this bit on gen6 */
- if (INTEL_INFO(dev)->gen >= 6)
- mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
- }
- break;
- default:
- DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
- return -EINVAL;
- }
-
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
- if (args->num_cliprects != 0) {
- if (ring != &dev_priv->ring[RCS]) {
- DRM_DEBUG("clip rectangles are only valid with the render ring\n");
- return -EINVAL;
- }
-
- if (INTEL_INFO(dev)->gen >= 5) {
- DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
- return -EINVAL;
- }
-
- if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
- DRM_DEBUG("execbuf with %u cliprects\n",
- args->num_cliprects);
- return -EINVAL;
- }
-
- cliprects = kcalloc(args->num_cliprects,
- sizeof(*cliprects),
- GFP_KERNEL);
- if (cliprects == NULL) {
- ret = -ENOMEM;
- goto pre_mutex_err;
- }
-
- if (copy_from_user(cliprects,
- to_user_ptr(args->cliprects_ptr),
- sizeof(*cliprects)*args->num_cliprects)) {
- ret = -EFAULT;
- goto pre_mutex_err;
- }
- } else {
- if (args->DR4 == 0xffffffff) {
- DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
- args->DR4 = 0;
- }
-
- if (args->DR1 || args->DR4 || args->cliprects_ptr) {
- DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
- return -EINVAL;
- }
- }
-
intel_runtime_pm_get(dev_priv);
ret = i915_mutex_lock_interruptible(dev);
@@ -1322,63 +1400,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
else
exec_start += i915_gem_obj_offset(batch_obj, vm);
- ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
+ ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
+ args, &eb->vmas, batch_obj, exec_start, flags);
if (ret)
goto err;
- ret = i915_switch_context(ring, ctx);
- if (ret)
- goto err;
-
- if (ring == &dev_priv->ring[RCS] &&
- mode != dev_priv->relative_constants_mode) {
- ret = intel_ring_begin(ring, 4);
- if (ret)
- goto err;
-
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, INSTPM);
- intel_ring_emit(ring, mask << 16 | mode);
- intel_ring_advance(ring);
-
- dev_priv->relative_constants_mode = mode;
- }
-
- if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
- ret = i915_reset_gen7_sol_offsets(dev, ring);
- if (ret)
- goto err;
- }
-
-
- exec_len = args->batch_len;
- if (cliprects) {
- for (i = 0; i < args->num_cliprects; i++) {
- ret = i915_emit_box(dev, &cliprects[i],
- args->DR1, args->DR4);
- if (ret)
- goto err;
-
- ret = ring->dispatch_execbuffer(ring,
- exec_start, exec_len,
- flags);
- if (ret)
- goto err;
- }
- } else {
- ret = ring->dispatch_execbuffer(ring,
- exec_start, exec_len,
- flags);
- if (ret)
- goto err;
- }
-
- trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
-
- i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
- i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
-
err:
/* the request owns the ref now */
i915_gem_context_unreference(ctx);
@@ -1387,8 +1413,6 @@ err:
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:
- kfree(cliprects);
-
/* intel_gpu_busy should also get a ref, so it will free when the device
* is really idle. */
intel_runtime_pm_put(dev_priv);
@@ -1525,7 +1549,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
- struct drm_i915_gem_exec_object2 *user_exec_list =
+ struct drm_i915_gem_exec_object2 __user *user_exec_list =
to_user_ptr(args->buffers_ptr);
int i;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8b3cde703364..5188936bca0a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -63,6 +63,13 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
}
#endif
+ /* Early VLV doesn't have this */
+ if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
+ dev->pdev->revision < 0xb) {
+ DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
+ return 0;
+ }
+
return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
}
@@ -110,7 +117,7 @@ static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -132,7 +139,7 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -156,7 +163,7 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 flags)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -164,7 +171,8 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
/* Mark the page as writeable. Other platforms don't have a
* setting for read-only/writable, so this matches that behavior.
*/
- pte |= BYT_PTE_WRITEABLE;
+ if (!(flags & PTE_READ_ONLY))
+ pte |= BYT_PTE_WRITEABLE;
if (level != I915_CACHE_NONE)
pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
@@ -174,7 +182,7 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -187,7 +195,7 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
- bool valid)
+ bool valid, u32 unused)
{
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -301,7 +309,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
uint64_t start,
- enum i915_cache_level cache_level)
+ enum i915_cache_level cache_level, u32 unused)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
@@ -639,7 +647,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
uint32_t pd_entry;
int pte, pde;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
@@ -941,7 +949,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -964,7 +972,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
uint64_t start,
- enum i915_cache_level cache_level)
+ enum i915_cache_level cache_level, u32 flags)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
@@ -981,7 +989,8 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
- cache_level, true);
+ cache_level, true, flags);
+
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
kunmap_atomic(pt_vaddr);
pt_vaddr = NULL;
@@ -1218,8 +1227,12 @@ ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
+ /* Currently applicable only to VLV */
+ if (vma->obj->gt_ro)
+ flags |= PTE_READ_ONLY;
+
vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
- cache_level);
+ cache_level, flags);
}
static void ppgtt_unbind_vma(struct i915_vma *vma)
@@ -1394,7 +1407,7 @@ static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
- enum i915_cache_level level)
+ enum i915_cache_level level, u32 unused)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
@@ -1440,7 +1453,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
- enum i915_cache_level level)
+ enum i915_cache_level level, u32 flags)
{
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
@@ -1452,7 +1465,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter);
- iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
+ iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
i++;
}
@@ -1464,7 +1477,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
*/
if (i != 0)
WARN_ON(readl(&gtt_entries[i-1]) !=
- vm->pte_encode(addr, level, true));
+ vm->pte_encode(addr, level, true, flags));
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -1518,7 +1531,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
@@ -1567,6 +1580,10 @@ static void ggtt_bind_vma(struct i915_vma *vma,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
+ /* Currently applicable only to VLV */
+ if (obj->gt_ro)
+ flags |= PTE_READ_ONLY;
+
/* If there is no aliasing PPGTT, or the caller needs a global mapping,
* or we have a global mapping already but the cacheability flags have
* changed, set the global PTEs.
@@ -1583,7 +1600,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
(cache_level != obj->cache_level)) {
vma->vm->insert_entries(vma->vm, obj->pages,
vma->node.start,
- cache_level);
+ cache_level, flags);
obj->has_global_gtt_mapping = 1;
}
}
@@ -1595,7 +1612,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
appgtt->base.insert_entries(&appgtt->base,
vma->obj->pages,
vma->node.start,
- cache_level);
+ cache_level, flags);
vma->obj->has_aliasing_ppgtt_mapping = 1;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 1b96a06be3cb..8d6f7c18c404 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -154,6 +154,7 @@ struct i915_vma {
void (*unbind_vma)(struct i915_vma *vma);
/* Map an object into an address space with the given cache flags. */
#define GLOBAL_BIND (1<<0)
+#define PTE_READ_ONLY (1<<1)
void (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
@@ -197,7 +198,7 @@ struct i915_address_space {
/* FIXME: Need a more generic return type */
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
- bool valid); /* Create a valid PTE */
+ bool valid, u32 flags); /* Create a valid PTE */
void (*clear_range)(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
@@ -205,7 +206,7 @@ struct i915_address_space {
void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st,
uint64_t start,
- enum i915_cache_level cache_level);
+ enum i915_cache_level cache_level, u32 flags);
void (*cleanup)(struct i915_address_space *vm);
};
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 34894b573064..e60be3f552a6 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -28,64 +28,13 @@
#include "i915_drv.h"
#include "intel_renderstate.h"
-struct i915_render_state {
+struct render_state {
+ const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
- unsigned long ggtt_offset;
- u32 *batch;
- u32 size;
- u32 len;
+ u64 ggtt_offset;
+ int gen;
};
-static struct i915_render_state *render_state_alloc(struct drm_device *dev)
-{
- struct i915_render_state *so;
- struct page *page;
- int ret;
-
- so = kzalloc(sizeof(*so), GFP_KERNEL);
- if (!so)
- return ERR_PTR(-ENOMEM);
-
- so->obj = i915_gem_alloc_object(dev, 4096);
- if (so->obj == NULL) {
- ret = -ENOMEM;
- goto free;
- }
- so->size = 4096;
-
- ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
- if (ret)
- goto free_gem;
-
- BUG_ON(so->obj->pages->nents != 1);
- page = sg_page(so->obj->pages->sgl);
-
- so->batch = kmap(page);
- if (!so->batch) {
- ret = -ENOMEM;
- goto unpin;
- }
-
- so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
-
- return so;
-unpin:
- i915_gem_object_ggtt_unpin(so->obj);
-free_gem:
- drm_gem_object_unreference(&so->obj->base);
-free:
- kfree(so);
- return ERR_PTR(ret);
-}
-
-static void render_state_free(struct i915_render_state *so)
-{
- kunmap(kmap_to_page(so->batch));
- i915_gem_object_ggtt_unpin(so->obj);
- drm_gem_object_unreference(&so->obj->base);
- kfree(so);
-}
-
static const struct intel_renderstate_rodata *
render_state_get_rodata(struct drm_device *dev, const int gen)
{
@@ -101,98 +50,120 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
return NULL;
}
-static int render_state_setup(const int gen,
- const struct intel_renderstate_rodata *rodata,
- struct i915_render_state *so)
+static int render_state_init(struct render_state *so, struct drm_device *dev)
{
- const u64 goffset = i915_gem_obj_ggtt_offset(so->obj);
- u32 reloc_index = 0;
- u32 * const d = so->batch;
- unsigned int i = 0;
int ret;
- if (!rodata || rodata->batch_items * 4 > so->size)
+ so->gen = INTEL_INFO(dev)->gen;
+ so->rodata = render_state_get_rodata(dev, so->gen);
+ if (so->rodata == NULL)
+ return 0;
+
+ if (so->rodata->batch_items * 4 > 4096)
return -EINVAL;
+ so->obj = i915_gem_alloc_object(dev, 4096);
+ if (so->obj == NULL)
+ return -ENOMEM;
+
+ ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
+ if (ret)
+ goto free_gem;
+
+ so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
+ return 0;
+
+free_gem:
+ drm_gem_object_unreference(&so->obj->base);
+ return ret;
+}
+
+static int render_state_setup(struct render_state *so)
+{
+ const struct intel_renderstate_rodata *rodata = so->rodata;
+ unsigned int i = 0, reloc_index = 0;
+ struct page *page;
+ u32 *d;
+ int ret;
+
ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
if (ret)
return ret;
+ page = sg_page(so->obj->pages->sgl);
+ d = kmap(page);
+
while (i < rodata->batch_items) {
u32 s = rodata->batch[i];
- if (reloc_index < rodata->reloc_items &&
- i * 4 == rodata->reloc[reloc_index]) {
-
- s += goffset & 0xffffffff;
-
- /* We keep batch offsets max 32bit */
- if (gen >= 8) {
+ if (i * 4 == rodata->reloc[reloc_index]) {
+ u64 r = s + so->ggtt_offset;
+ s = lower_32_bits(r);
+ if (so->gen >= 8) {
if (i + 1 >= rodata->batch_items ||
rodata->batch[i + 1] != 0)
return -EINVAL;
- d[i] = s;
- i++;
- s = (goffset & 0xffffffff00000000ull) >> 32;
+ d[i++] = s;
+ s = upper_32_bits(r);
}
reloc_index++;
}
- d[i] = s;
- i++;
+ d[i++] = s;
}
+ kunmap(page);
ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
if (ret)
return ret;
- if (rodata->reloc_items != reloc_index) {
- DRM_ERROR("not all relocs resolved, %d out of %d\n",
- reloc_index, rodata->reloc_items);
+ if (rodata->reloc[reloc_index] != -1) {
+ DRM_ERROR("only %d relocs resolved\n", reloc_index);
return -EINVAL;
}
- so->len = rodata->batch_items * 4;
-
return 0;
}
+static void render_state_fini(struct render_state *so)
+{
+ i915_gem_object_ggtt_unpin(so->obj);
+ drm_gem_object_unreference(&so->obj->base);
+}
+
int i915_gem_render_state_init(struct intel_engine_cs *ring)
{
- const int gen = INTEL_INFO(ring->dev)->gen;
- struct i915_render_state *so;
- const struct intel_renderstate_rodata *rodata;
+ struct render_state so;
int ret;
if (WARN_ON(ring->id != RCS))
return -ENOENT;
- rodata = render_state_get_rodata(ring->dev, gen);
- if (rodata == NULL)
- return 0;
+ ret = render_state_init(&so, ring->dev);
+ if (ret)
+ return ret;
- so = render_state_alloc(ring->dev);
- if (IS_ERR(so))
- return PTR_ERR(so);
+ if (so.rodata == NULL)
+ return 0;
- ret = render_state_setup(gen, rodata, so);
+ ret = render_state_setup(&so);
if (ret)
goto out;
ret = ring->dispatch_execbuffer(ring,
- i915_gem_obj_ggtt_offset(so->obj),
- so->len,
+ so.ggtt_offset,
+ so.rodata->batch_items * 4,
I915_DISPATCH_SECURE);
if (ret)
goto out;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(so->obj), ring);
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- ret = __i915_add_request(ring, NULL, so->obj, NULL);
+ ret = __i915_add_request(ring, NULL, so.obj, NULL);
/* __i915_add_request moves object to inactive if it fails */
out:
- render_state_free(so);
+ render_state_fini(&so);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 7465ab0fd396..21c025a209c0 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -147,30 +147,68 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
return base;
}
-static int i915_setup_compression(struct drm_device *dev, int size)
+static int find_compression_threshold(struct drm_device *dev,
+ struct drm_mm_node *node,
+ int size,
+ int fb_cpp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
+ int compression_threshold = 1;
int ret;
- compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
- if (!compressed_fb)
- goto err_llb;
+ /* HACK: This code depends on what we will do in *_enable_fbc. If that
+ * code changes, this code needs to change as well.
+ *
+ * The enable_fbc code will attempt to use one of our 2 compression
+ * thresholds, therefore, in that case, we only have 1 resort.
+ */
- /* Try to over-allocate to reduce reallocations and fragmentation */
- ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
+ /* Try to over-allocate to reduce reallocations and fragmentation. */
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
- if (ret)
- ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
- size >>= 1, 4096,
- DRM_MM_SEARCH_DEFAULT);
- if (ret)
+ if (ret == 0)
+ return compression_threshold;
+
+again:
+ /* HW's ability to limit the CFB is 1:4 */
+ if (compression_threshold > 4 ||
+ (fb_cpp == 2 && compression_threshold == 2))
+ return 0;
+
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
+ size >>= 1, 4096,
+ DRM_MM_SEARCH_DEFAULT);
+ if (ret && INTEL_INFO(dev)->gen <= 4) {
+ return 0;
+ } else if (ret) {
+ compression_threshold <<= 1;
+ goto again;
+ } else {
+ return compression_threshold;
+ }
+}
+
+static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mm_node *uninitialized_var(compressed_llb);
+ int ret;
+
+ ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
+ size, fb_cpp);
+ if (!ret)
goto err_llb;
+ else if (ret > 1) {
+ DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
+
+ }
+
+ dev_priv->fbc.threshold = ret;
if (HAS_PCH_SPLIT(dev))
- I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
+ I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
else if (IS_GM45(dev)) {
- I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
+ I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
} else {
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
@@ -184,13 +222,12 @@ static int i915_setup_compression(struct drm_device *dev, int size)
dev_priv->fbc.compressed_llb = compressed_llb;
I915_WRITE(FBC_CFB_BASE,
- dev_priv->mm.stolen_base + compressed_fb->start);
+ dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
I915_WRITE(FBC_LL_BASE,
dev_priv->mm.stolen_base + compressed_llb->start);
}
- dev_priv->fbc.compressed_fb = compressed_fb;
- dev_priv->fbc.size = size;
+ dev_priv->fbc.size = size / dev_priv->fbc.threshold;
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size);
@@ -199,14 +236,13 @@ static int i915_setup_compression(struct drm_device *dev, int size)
err_fb:
kfree(compressed_llb);
- drm_mm_remove_node(compressed_fb);
+ drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
err_llb:
- kfree(compressed_fb);
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
-int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
+int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -219,7 +255,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
/* Release any current block */
i915_gem_stolen_cleanup_compression(dev);
- return i915_setup_compression(dev, size);
+ return i915_setup_compression(dev, size, fb_cpp);
}
void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
@@ -229,10 +265,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
if (dev_priv->fbc.size == 0)
return;
- if (dev_priv->fbc.compressed_fb) {
- drm_mm_remove_node(dev_priv->fbc.compressed_fb);
- kfree(dev_priv->fbc.compressed_fb);
- }
+ drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
if (dev_priv->fbc.compressed_llb) {
drm_mm_remove_node(dev_priv->fbc.compressed_llb);
@@ -336,9 +369,20 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
kfree(obj->pages);
}
+
+static void
+i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
+{
+ if (obj->stolen) {
+ drm_mm_remove_node(obj->stolen);
+ kfree(obj->stolen);
+ obj->stolen = NULL;
+ }
+}
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.get_pages = i915_gem_object_get_pages_stolen,
.put_pages = i915_gem_object_put_pages_stolen,
+ .release = i915_gem_object_release_stolen,
};
static struct drm_i915_gem_object *
@@ -496,13 +540,3 @@ err_out:
drm_gem_object_unreference(&obj->base);
return NULL;
}
-
-void
-i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
-{
- if (obj->stolen) {
- drm_mm_remove_node(obj->stolen);
- kfree(obj->stolen);
- obj->stolen = NULL;
- }
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 21ea92886a56..fe69fc837d9e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -40,19 +40,87 @@ struct i915_mmu_notifier {
struct hlist_node node;
struct mmu_notifier mn;
struct rb_root objects;
+ struct list_head linear;
struct drm_device *dev;
struct mm_struct *mm;
struct work_struct work;
unsigned long count;
unsigned long serial;
+ bool has_linear;
};
struct i915_mmu_object {
struct i915_mmu_notifier *mmu;
struct interval_tree_node it;
+ struct list_head link;
struct drm_i915_gem_object *obj;
+ bool is_linear;
};
+static unsigned long cancel_userptr(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ unsigned long end;
+
+ mutex_lock(&dev->struct_mutex);
+ /* Cancel any active worker and force us to re-evaluate gup */
+ obj->userptr.work = NULL;
+
+ if (obj->pages != NULL) {
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_vma *vma, *tmp;
+ bool was_interruptible;
+
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
+
+ list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
+ int ret = i915_vma_unbind(vma);
+ WARN_ON(ret && ret != -EIO);
+ }
+ WARN_ON(i915_gem_object_put_pages(obj));
+
+ dev_priv->mm.interruptible = was_interruptible;
+ }
+
+ end = obj->userptr.ptr + obj->base.size;
+
+ drm_gem_object_unreference(&obj->base);
+ mutex_unlock(&dev->struct_mutex);
+
+ return end;
+}
+
+static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct i915_mmu_object *mmu;
+ unsigned long serial;
+
+restart:
+ serial = mn->serial;
+ list_for_each_entry(mmu, &mn->linear, link) {
+ struct drm_i915_gem_object *obj;
+
+ if (mmu->it.last < start || mmu->it.start > end)
+ continue;
+
+ obj = mmu->obj;
+ drm_gem_object_reference(&obj->base);
+ spin_unlock(&mn->lock);
+
+ cancel_userptr(obj);
+
+ spin_lock(&mn->lock);
+ if (serial != mn->serial)
+ goto restart;
+ }
+
+ return NULL;
+}
+
static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
struct mm_struct *mm,
unsigned long start,
@@ -60,16 +128,18 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
{
struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
struct interval_tree_node *it = NULL;
+ unsigned long next = start;
unsigned long serial = 0;
end--; /* interval ranges are inclusive, but invalidate range is exclusive */
- while (start < end) {
- struct drm_i915_gem_object *obj;
+ while (next < end) {
+ struct drm_i915_gem_object *obj = NULL;
- obj = NULL;
spin_lock(&mn->lock);
- if (serial == mn->serial)
- it = interval_tree_iter_next(it, start, end);
+ if (mn->has_linear)
+ it = invalidate_range__linear(mn, mm, start, end);
+ else if (serial == mn->serial)
+ it = interval_tree_iter_next(it, next, end);
else
it = interval_tree_iter_first(&mn->objects, start, end);
if (it != NULL) {
@@ -81,31 +151,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
if (obj == NULL)
return;
- mutex_lock(&mn->dev->struct_mutex);
- /* Cancel any active worker and force us to re-evaluate gup */
- obj->userptr.work = NULL;
-
- if (obj->pages != NULL) {
- struct drm_i915_private *dev_priv = to_i915(mn->dev);
- struct i915_vma *vma, *tmp;
- bool was_interruptible;
-
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
-
- list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
- int ret = i915_vma_unbind(vma);
- WARN_ON(ret && ret != -EIO);
- }
- WARN_ON(i915_gem_object_put_pages(obj));
-
- dev_priv->mm.interruptible = was_interruptible;
- }
-
- start = obj->userptr.ptr + obj->base.size;
-
- drm_gem_object_unreference(&obj->base);
- mutex_unlock(&mn->dev->struct_mutex);
+ next = cancel_userptr(obj);
}
}
@@ -150,7 +196,9 @@ i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
mmu->mm = mm;
mmu->objects = RB_ROOT;
mmu->count = 0;
- mmu->serial = 0;
+ mmu->serial = 1;
+ INIT_LIST_HEAD(&mmu->linear);
+ mmu->has_linear = false;
/* Protected by mmap_sem (write-lock) */
ret = __mmu_notifier_register(&mmu->mn, mm);
@@ -197,6 +245,17 @@ static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
mmu->serial = 1;
}
+static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu)
+{
+ struct i915_mmu_object *mn;
+
+ list_for_each_entry(mn, &mmu->linear, link)
+ if (mn->is_linear)
+ return true;
+
+ return false;
+}
+
static void
i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
struct i915_mmu_object *mn)
@@ -204,7 +263,11 @@ i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
lockdep_assert_held(&mmu->dev->struct_mutex);
spin_lock(&mmu->lock);
- interval_tree_remove(&mn->it, &mmu->objects);
+ list_del(&mn->link);
+ if (mn->is_linear)
+ mmu->has_linear = i915_mmu_notifier_has_linear(mmu);
+ else
+ interval_tree_remove(&mn->it, &mmu->objects);
__i915_mmu_notifier_update_serial(mmu);
spin_unlock(&mmu->lock);
@@ -230,7 +293,6 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
*/
i915_gem_retire_requests(mmu->dev);
- /* Disallow overlapping userptr objects */
spin_lock(&mmu->lock);
it = interval_tree_iter_first(&mmu->objects,
mn->it.start, mn->it.last);
@@ -243,14 +305,22 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
* to flush their object references upon which the object will
* be removed from the interval-tree, or the the range is
* still in use by another client and the overlap is invalid.
+ *
+ * If we do have an overlap, we cannot use the interval tree
+ * for fast range invalidation.
*/
obj = container_of(it, struct i915_mmu_object, it)->obj;
- ret = obj->userptr.workers ? -EAGAIN : -EINVAL;
- } else {
+ if (!obj->userptr.workers)
+ mmu->has_linear = mn->is_linear = true;
+ else
+ ret = -EAGAIN;
+ } else
interval_tree_insert(&mn->it, &mmu->objects);
+
+ if (ret == 0) {
+ list_add(&mn->link, &mmu->linear);
__i915_mmu_notifier_update_serial(mmu);
- ret = 0;
}
spin_unlock(&mmu->lock);
mutex_unlock(&mmu->dev->struct_mutex);
@@ -611,12 +681,11 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
* We impose several restrictions upon the memory being mapped
* into the GPU.
* 1. It must be page aligned (both start/end addresses, i.e ptr and size).
- * 2. It cannot overlap any other userptr object in the same address space.
- * 3. It must be normal system memory, not a pointer into another map of IO
+ * 2. It must be normal system memory, not a pointer into another map of IO
* space (e.g. it must not be a GTT mmapping of another object).
- * 4. We only allow a bo as large as we could in theory map into the GTT,
+ * 3. We only allow a bo as large as we could in theory map into the GTT,
* that is we limit the size to the total size of the GTT.
- * 5. The bo is marked as being snoopable. The backing pages are left
+ * 4. The bo is marked as being snoopable. The backing pages are left
* accessible directly by the CPU, but reads and writes by the GPU may
* incur the cost of a snoop (unless you have an LLC architecture).
*
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 66cf41765bf9..0b3f69439451 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -327,6 +327,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
struct drm_device *dev = error_priv->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error = error_priv->error;
+ struct drm_i915_error_object *obj;
int i, j, offset, elt;
int max_hangcheck_score;
@@ -395,8 +396,6 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
error->pinned_bo_count[0]);
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
- struct drm_i915_error_object *obj;
-
obj = error->ring[i].batchbuffer;
if (obj) {
err_puts(m, dev_priv->ring[i].name);
@@ -459,6 +458,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
+ if ((obj = error->semaphore_obj)) {
+ err_printf(m, "Semaphore page = 0x%08x\n", obj->gtt_offset);
+ for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+ err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+ elt * 4,
+ obj->pages[0][elt],
+ obj->pages[0][elt+1],
+ obj->pages[0][elt+2],
+ obj->pages[0][elt+3]);
+ }
+ }
+
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
@@ -529,6 +540,7 @@ static void i915_error_state_free(struct kref *error_ref)
kfree(error->ring[i].requests);
}
+ i915_error_object_free(error->semaphore_obj);
kfree(error->active_bo);
kfree(error->overlay);
kfree(error->display);
@@ -746,7 +758,59 @@ static void i915_gem_record_fences(struct drm_device *dev,
}
}
+
+static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error,
+ struct intel_engine_cs *ring,
+ struct drm_i915_error_ring *ering)
+{
+ struct intel_engine_cs *to;
+ int i;
+
+ if (!i915_semaphore_is_enabled(dev_priv->dev))
+ return;
+
+ if (!error->semaphore_obj)
+ error->semaphore_obj =
+ i915_error_object_create(dev_priv,
+ dev_priv->semaphore_obj,
+ &dev_priv->gtt.base);
+
+ for_each_ring(to, dev_priv, i) {
+ int idx;
+ u16 signal_offset;
+ u32 *tmp;
+
+ if (ring == to)
+ continue;
+
+ signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & PAGE_MASK) / 4;
+ tmp = error->semaphore_obj->pages[0];
+ idx = intel_ring_sync_index(ring, to);
+
+ ering->semaphore_mboxes[idx] = tmp[signal_offset];
+ ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
+ }
+}
+
+static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *ring,
+ struct drm_i915_error_ring *ering)
+{
+ ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
+ ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
+ ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
+ ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
+
+ if (HAS_VEBOX(dev_priv->dev)) {
+ ering->semaphore_mboxes[2] =
+ I915_READ(RING_SYNC_2(ring->mmio_base));
+ ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
+ }
+}
+
static void i915_record_ring_state(struct drm_device *dev,
+ struct drm_i915_error_state *error,
struct intel_engine_cs *ring,
struct drm_i915_error_ring *ering)
{
@@ -755,18 +819,10 @@ static void i915_record_ring_state(struct drm_device *dev,
if (INTEL_INFO(dev)->gen >= 6) {
ering->rc_psmi = I915_READ(ring->mmio_base + 0x50);
ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
- ering->semaphore_mboxes[0]
- = I915_READ(RING_SYNC_0(ring->mmio_base));
- ering->semaphore_mboxes[1]
- = I915_READ(RING_SYNC_1(ring->mmio_base));
- ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
- ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
- }
-
- if (HAS_VEBOX(dev)) {
- ering->semaphore_mboxes[2] =
- I915_READ(RING_SYNC_2(ring->mmio_base));
- ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
+ if (INTEL_INFO(dev)->gen >= 8)
+ gen8_record_semaphore_state(dev_priv, error, ring, ering);
+ else
+ gen6_record_semaphore_state(dev_priv, ring, ering);
}
if (INTEL_INFO(dev)->gen >= 4) {
@@ -871,6 +927,9 @@ static void i915_gem_record_active_context(struct intel_engine_cs *ring,
return;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (!i915_gem_obj_ggtt_bound(obj))
+ continue;
+
if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
break;
@@ -895,7 +954,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
error->ring[i].valid = true;
- i915_record_ring_state(dev, ring, &error->ring[i]);
+ i915_record_ring_state(dev, error, ring, &error->ring[i]);
request = i915_gem_find_active_request(ring);
if (request) {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c05c84f3f091..6ef9d6fabf80 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -136,7 +136,7 @@ ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
if ((dev_priv->irq_mask & mask) != 0) {
@@ -151,7 +151,7 @@ ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (!intel_irqs_enabled(dev_priv))
return;
if ((dev_priv->irq_mask & mask) != mask) {
@@ -173,7 +173,7 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
{
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
dev_priv->gt_irq_mask &= ~interrupt_mask;
@@ -182,12 +182,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
POSTING_READ(GTIMR);
}
-void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
ilk_update_gt_irq(dev_priv, mask, mask);
}
-void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
ilk_update_gt_irq(dev_priv, mask, 0);
}
@@ -206,7 +206,7 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
new_val = dev_priv->pm_irq_mask;
@@ -220,12 +220,12 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
}
}
-void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
snb_update_pm_irq(dev_priv, mask, mask);
}
-void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
snb_update_pm_irq(dev_priv, mask, 0);
}
@@ -264,7 +264,7 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
new_val = dev_priv->pm_irq_mask;
@@ -278,12 +278,12 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
}
}
-void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
bdw_update_pm_irq(dev_priv, mask, mask);
}
-void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
bdw_update_pm_irq(dev_priv, mask, 0);
}
@@ -420,7 +420,7 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
assert_spin_locked(&dev_priv->irq_lock);
- if (WARN_ON(dev_priv->pm.irqs_disabled))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
I915_WRITE(SDEIMR, sdeimr);
@@ -1090,6 +1090,53 @@ static bool intel_hpd_irq_event(struct drm_device *dev,
return true;
}
+static void i915_digport_work_func(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, dig_port_work);
+ unsigned long irqflags;
+ u32 long_port_mask, short_port_mask;
+ struct intel_digital_port *intel_dig_port;
+ int i, ret;
+ u32 old_bits = 0;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ long_port_mask = dev_priv->long_hpd_port_mask;
+ dev_priv->long_hpd_port_mask = 0;
+ short_port_mask = dev_priv->short_hpd_port_mask;
+ dev_priv->short_hpd_port_mask = 0;
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ for (i = 0; i < I915_MAX_PORTS; i++) {
+ bool valid = false;
+ bool long_hpd = false;
+ intel_dig_port = dev_priv->hpd_irq_port[i];
+ if (!intel_dig_port || !intel_dig_port->hpd_pulse)
+ continue;
+
+ if (long_port_mask & (1 << i)) {
+ valid = true;
+ long_hpd = true;
+ } else if (short_port_mask & (1 << i))
+ valid = true;
+
+ if (valid) {
+ ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
+ if (ret == true) {
+ /* if we get true fallback to old school hpd */
+ old_bits |= (1 << intel_dig_port->base.hpd_pin);
+ }
+ }
+ }
+
+ if (old_bits) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ dev_priv->hpd_event_bits |= old_bits;
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ schedule_work(&dev_priv->hotplug_work);
+ }
+}
+
/*
* Handle hotplug events outside the interrupt handler proper.
*/
@@ -1109,10 +1156,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
bool changed = false;
u32 hpd_event_bits;
- /* HPD irq before everything is fully set up. */
- if (!dev_priv->enable_hotplug_processing)
- return;
-
mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
@@ -1122,6 +1165,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
dev_priv->hpd_event_bits = 0;
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
+ if (!intel_connector->encoder)
+ continue;
intel_encoder = intel_connector->encoder;
if (intel_encoder->hpd_pin > HPD_NONE &&
dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
@@ -1152,6 +1197,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
list_for_each_entry(connector, &mode_config->connector_list, head) {
intel_connector = to_intel_connector(connector);
+ if (!intel_connector->encoder)
+ continue;
intel_encoder = intel_connector->encoder;
if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
if (intel_encoder->hot_plug)
@@ -1218,10 +1265,138 @@ static void notify_ring(struct drm_device *dev,
trace_i915_gem_request_complete(ring);
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ intel_notify_mmio_flip(ring);
+
wake_up_all(&ring->irq_queue);
i915_queue_hangcheck(dev);
}
+static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
+ struct intel_rps_ei *rps_ei)
+{
+ u32 cz_ts, cz_freq_khz;
+ u32 render_count, media_count;
+ u32 elapsed_render, elapsed_media, elapsed_time;
+ u32 residency = 0;
+
+ cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
+ cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
+
+ render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
+ media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
+
+ if (rps_ei->cz_clock == 0) {
+ rps_ei->cz_clock = cz_ts;
+ rps_ei->render_c0 = render_count;
+ rps_ei->media_c0 = media_count;
+
+ return dev_priv->rps.cur_freq;
+ }
+
+ elapsed_time = cz_ts - rps_ei->cz_clock;
+ rps_ei->cz_clock = cz_ts;
+
+ elapsed_render = render_count - rps_ei->render_c0;
+ rps_ei->render_c0 = render_count;
+
+ elapsed_media = media_count - rps_ei->media_c0;
+ rps_ei->media_c0 = media_count;
+
+ /* Convert all the counters into common unit of milli sec */
+ elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
+ elapsed_render /= cz_freq_khz;
+ elapsed_media /= cz_freq_khz;
+
+ /*
+ * Calculate overall C0 residency percentage
+ * only if elapsed time is non zero
+ */
+ if (elapsed_time) {
+ residency =
+ ((max(elapsed_render, elapsed_media) * 100)
+ / elapsed_time);
+ }
+
+ return residency;
+}
+
+/**
+ * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
+ * busy-ness calculated from C0 counters of render & media power wells
+ * @dev_priv: DRM device private
+ *
+ */
+static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
+{
+ u32 residency_C0_up = 0, residency_C0_down = 0;
+ u8 new_delay, adj;
+
+ dev_priv->rps.ei_interrupt_count++;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+
+ if (dev_priv->rps.up_ei.cz_clock == 0) {
+ vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
+ vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
+ return dev_priv->rps.cur_freq;
+ }
+
+
+ /*
+ * To down throttle, C0 residency should be less than down threshold
+ * for continous EI intervals. So calculate down EI counters
+ * once in VLV_INT_COUNT_FOR_DOWN_EI
+ */
+ if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
+
+ dev_priv->rps.ei_interrupt_count = 0;
+
+ residency_C0_down = vlv_c0_residency(dev_priv,
+ &dev_priv->rps.down_ei);
+ } else {
+ residency_C0_up = vlv_c0_residency(dev_priv,
+ &dev_priv->rps.up_ei);
+ }
+
+ new_delay = dev_priv->rps.cur_freq;
+
+ adj = dev_priv->rps.last_adj;
+ /* C0 residency is greater than UP threshold. Increase Frequency */
+ if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
+ if (adj > 0)
+ adj *= 2;
+ else
+ adj = 1;
+
+ if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
+ new_delay = dev_priv->rps.cur_freq + adj;
+
+ /*
+ * For better performance, jump directly
+ * to RPe if we're below it.
+ */
+ if (new_delay < dev_priv->rps.efficient_freq)
+ new_delay = dev_priv->rps.efficient_freq;
+
+ } else if (!dev_priv->rps.ei_interrupt_count &&
+ (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
+ if (adj < 0)
+ adj *= 2;
+ else
+ adj = -1;
+ /*
+ * This means, C0 residency is less than down threshold over
+ * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
+ */
+ if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
+ new_delay = dev_priv->rps.cur_freq + adj;
+ }
+
+ return new_delay;
+}
+
static void gen6_pm_rps_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
@@ -1232,11 +1407,11 @@ static void gen6_pm_rps_work(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock);
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
- if (IS_BROADWELL(dev_priv->dev))
- bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ if (INTEL_INFO(dev_priv->dev)->gen >= 8)
+ gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
else {
/* Make sure not to corrupt PMIMR state used by ringbuffer */
- snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
}
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1252,8 +1427,10 @@ static void gen6_pm_rps_work(struct work_struct *work)
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (adj > 0)
adj *= 2;
- else
- adj = 1;
+ else {
+ /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
+ }
new_delay = dev_priv->rps.cur_freq + adj;
/*
@@ -1268,11 +1445,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
else
new_delay = dev_priv->rps.min_freq_softlimit;
adj = 0;
+ } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
+ new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0)
adj *= 2;
- else
- adj = -1;
+ else {
+ /* CHV needs even encode values */
+ adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
+ }
new_delay = dev_priv->rps.cur_freq + adj;
} else { /* unknown event */
new_delay = dev_priv->rps.cur_freq;
@@ -1372,7 +1553,7 @@ static void ivybridge_parity_work(struct work_struct *work)
out:
WARN_ON(dev_priv->l3_parity.which_slice);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
+ gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
mutex_unlock(&dev_priv->dev->struct_mutex);
@@ -1386,7 +1567,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
return;
spin_lock(&dev_priv->irq_lock);
- ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
+ gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
spin_unlock(&dev_priv->irq_lock);
iir &= GT_PARITY_ERROR(dev);
@@ -1441,7 +1622,7 @@ static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
spin_lock(&dev_priv->irq_lock);
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
- bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
spin_unlock(&dev_priv->irq_lock);
queue_work(dev_priv->wq, &dev_priv->rps.work);
@@ -1458,6 +1639,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
tmp = I915_READ(GEN8_GT_IIR(0));
if (tmp) {
+ I915_WRITE(GEN8_GT_IIR(0), tmp);
ret = IRQ_HANDLED;
rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
@@ -1465,7 +1647,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
notify_ring(dev, &dev_priv->ring[RCS]);
if (bcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[BCS]);
- I915_WRITE(GEN8_GT_IIR(0), tmp);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
@@ -1473,6 +1654,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
tmp = I915_READ(GEN8_GT_IIR(1));
if (tmp) {
+ I915_WRITE(GEN8_GT_IIR(1), tmp);
ret = IRQ_HANDLED;
vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
if (vcs & GT_RENDER_USER_INTERRUPT)
@@ -1480,7 +1662,6 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS2]);
- I915_WRITE(GEN8_GT_IIR(1), tmp);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
@@ -1488,10 +1669,10 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (master_ctl & GEN8_GT_PM_IRQ) {
tmp = I915_READ(GEN8_GT_IIR(2));
if (tmp & dev_priv->pm_rps_events) {
- ret = IRQ_HANDLED;
- gen8_rps_irq_handler(dev_priv, tmp);
I915_WRITE(GEN8_GT_IIR(2),
tmp & dev_priv->pm_rps_events);
+ ret = IRQ_HANDLED;
+ gen8_rps_irq_handler(dev_priv, tmp);
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
}
@@ -1499,11 +1680,11 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (master_ctl & GEN8_GT_VECS_IRQ) {
tmp = I915_READ(GEN8_GT_IIR(3));
if (tmp) {
+ I915_WRITE(GEN8_GT_IIR(3), tmp);
ret = IRQ_HANDLED;
vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
if (vcs & GT_RENDER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VECS]);
- I915_WRITE(GEN8_GT_IIR(3), tmp);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
@@ -1514,23 +1695,104 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_THRESHOLD 5
+static int ilk_port_to_hotplug_shift(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ case PORT_E:
+ default:
+ return -1;
+ case PORT_B:
+ return 0;
+ case PORT_C:
+ return 8;
+ case PORT_D:
+ return 16;
+ }
+}
+
+static int g4x_port_to_hotplug_shift(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ case PORT_E:
+ default:
+ return -1;
+ case PORT_B:
+ return 17;
+ case PORT_C:
+ return 19;
+ case PORT_D:
+ return 21;
+ }
+}
+
+static inline enum port get_port_from_pin(enum hpd_pin pin)
+{
+ switch (pin) {
+ case HPD_PORT_B:
+ return PORT_B;
+ case HPD_PORT_C:
+ return PORT_C;
+ case HPD_PORT_D:
+ return PORT_D;
+ default:
+ return PORT_A; /* no hpd */
+ }
+}
+
static inline void intel_hpd_irq_handler(struct drm_device *dev,
u32 hotplug_trigger,
+ u32 dig_hotplug_reg,
const u32 *hpd)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
+ enum port port;
bool storm_detected = false;
+ bool queue_dig = false, queue_hp = false;
+ u32 dig_shift;
+ u32 dig_port_mask = 0;
if (!hotplug_trigger)
return;
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
- hotplug_trigger);
+ DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
+ hotplug_trigger, dig_hotplug_reg);
spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) {
+ if (!(hpd[i] & hotplug_trigger))
+ continue;
+
+ port = get_port_from_pin(i);
+ if (port && dev_priv->hpd_irq_port[port]) {
+ bool long_hpd;
+ if (IS_G4X(dev)) {
+ dig_shift = g4x_port_to_hotplug_shift(port);
+ long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
+ } else {
+ dig_shift = ilk_port_to_hotplug_shift(port);
+ long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
+ }
+
+ DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
+ /* for long HPD pulses we want to have the digital queue happen,
+ but we still want HPD storm detection to function. */
+ if (long_hpd) {
+ dev_priv->long_hpd_port_mask |= (1 << port);
+ dig_port_mask |= hpd[i];
+ } else {
+ /* for short HPD just trigger the digital queue */
+ dev_priv->short_hpd_port_mask |= (1 << port);
+ hotplug_trigger &= ~hpd[i];
+ }
+ queue_dig = true;
+ }
+ }
+
+ for (i = 1; i < HPD_NUM_PINS; i++) {
if (hpd[i] & hotplug_trigger &&
dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
/*
@@ -1550,7 +1812,11 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
continue;
- dev_priv->hpd_event_bits |= (1 << i);
+ if (!(dig_port_mask & hpd[i])) {
+ dev_priv->hpd_event_bits |= (1 << i);
+ queue_hp = true;
+ }
+
if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
dev_priv->hpd_stats[i].hpd_last_jiffies
+ msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
@@ -1579,7 +1845,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
* queue for otherwise the flush_work in the pageflip code will
* deadlock.
*/
- schedule_work(&dev_priv->hotplug_work);
+ if (queue_dig)
+ queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
+ if (queue_hp)
+ schedule_work(&dev_priv->hotplug_work);
}
static void gmbus_irq_handler(struct drm_device *dev)
@@ -1700,7 +1969,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
- snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
spin_unlock(&dev_priv->irq_lock);
queue_work(dev_priv->wq, &dev_priv->rps.work);
@@ -1809,26 +2078,28 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
- if (IS_G4X(dev)) {
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
+ if (hotplug_status) {
+ I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ /*
+ * Make sure hotplug status is cleared before we clear IIR, or else we
+ * may miss hotplug events.
+ */
+ POSTING_READ(PORT_HOTPLUG_STAT);
- intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x);
- } else {
- u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
+ if (IS_G4X(dev)) {
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
- intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
- }
+ intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
+ } else {
+ u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
- if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
- hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
- dp_aux_irq_handler(dev);
+ intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
+ }
- I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
- /*
- * Make sure hotplug status is cleared before we clear IIR, or else we
- * may miss hotplug events.
- */
- POSTING_READ(PORT_HOTPLUG_STAT);
+ if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
+ hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
+ dp_aux_irq_handler(dev);
+ }
}
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
@@ -1839,29 +2110,36 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
irqreturn_t ret = IRQ_NONE;
while (true) {
- iir = I915_READ(VLV_IIR);
+ /* Find, clear, then process each source of interrupt */
+
gt_iir = I915_READ(GTIIR);
+ if (gt_iir)
+ I915_WRITE(GTIIR, gt_iir);
+
pm_iir = I915_READ(GEN6_PMIIR);
+ if (pm_iir)
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+
+ iir = I915_READ(VLV_IIR);
+ if (iir) {
+ /* Consume port before clearing IIR or we'll miss events */
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
+ i9xx_hpd_irq_handler(dev);
+ I915_WRITE(VLV_IIR, iir);
+ }
if (gt_iir == 0 && pm_iir == 0 && iir == 0)
goto out;
ret = IRQ_HANDLED;
- snb_gt_irq_handler(dev, dev_priv, gt_iir);
-
- valleyview_pipestat_irq_handler(dev, iir);
-
- /* Consume port. Then clear IIR or we'll miss events */
- if (iir & I915_DISPLAY_PORT_INTERRUPT)
- i9xx_hpd_irq_handler(dev);
-
+ if (gt_iir)
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
if (pm_iir)
gen6_rps_irq_handler(dev_priv, pm_iir);
-
- I915_WRITE(GTIIR, gt_iir);
- I915_WRITE(GEN6_PMIIR, pm_iir);
- I915_WRITE(VLV_IIR, iir);
+ /* Call regardless, as some status bits might not be
+ * signalled in iir */
+ valleyview_pipestat_irq_handler(dev, iir);
}
out:
@@ -1882,21 +2160,27 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
if (master_ctl == 0 && iir == 0)
break;
+ ret = IRQ_HANDLED;
+
I915_WRITE(GEN8_MASTER_IRQ, 0);
- gen8_gt_irq_handler(dev, dev_priv, master_ctl);
+ /* Find, clear, then process each source of interrupt */
- valleyview_pipestat_irq_handler(dev, iir);
+ if (iir) {
+ /* Consume port before clearing IIR or we'll miss events */
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
+ i9xx_hpd_irq_handler(dev);
+ I915_WRITE(VLV_IIR, iir);
+ }
- /* Consume port. Then clear IIR or we'll miss events */
- i9xx_hpd_irq_handler(dev);
+ gen8_gt_irq_handler(dev, dev_priv, master_ctl);
- I915_WRITE(VLV_IIR, iir);
+ /* Call regardless, as some status bits might not be
+ * signalled in iir */
+ valleyview_pipestat_irq_handler(dev, iir);
I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
-
- ret = IRQ_HANDLED;
}
return ret;
@@ -1907,8 +2191,12 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
+ u32 dig_hotplug_reg;
- intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
+ dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+ I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
+
+ intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -2014,8 +2302,12 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
+ u32 dig_hotplug_reg;
+
+ dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
+ I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
- intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
+ intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2132,6 +2424,14 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
}
}
+/*
+ * To handle irqs with the minimum potential races with fresh interrupts, we:
+ * 1 - Disable Master Interrupt Control.
+ * 2 - Find the source(s) of the interrupt.
+ * 3 - Clear the Interrupt Identity bits (IIR).
+ * 4 - Process the interrupt(s) that had bits set in the IIRs.
+ * 5 - Re-enable Master Interrupt Control.
+ */
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
@@ -2159,32 +2459,34 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
POSTING_READ(SDEIER);
}
+ /* Find, clear, then process each source of interrupt */
+
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
+ I915_WRITE(GTIIR, gt_iir);
+ ret = IRQ_HANDLED;
if (INTEL_INFO(dev)->gen >= 6)
snb_gt_irq_handler(dev, dev_priv, gt_iir);
else
ilk_gt_irq_handler(dev, dev_priv, gt_iir);
- I915_WRITE(GTIIR, gt_iir);
- ret = IRQ_HANDLED;
}
de_iir = I915_READ(DEIIR);
if (de_iir) {
+ I915_WRITE(DEIIR, de_iir);
+ ret = IRQ_HANDLED;
if (INTEL_INFO(dev)->gen >= 7)
ivb_display_irq_handler(dev, de_iir);
else
ilk_display_irq_handler(dev, de_iir);
- I915_WRITE(DEIIR, de_iir);
- ret = IRQ_HANDLED;
}
if (INTEL_INFO(dev)->gen >= 6) {
u32 pm_iir = I915_READ(GEN6_PMIIR);
if (pm_iir) {
- gen6_rps_irq_handler(dev_priv, pm_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
ret = IRQ_HANDLED;
+ gen6_rps_irq_handler(dev_priv, pm_iir);
}
}
@@ -2215,36 +2517,36 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
+ /* Find, clear, then process each source of interrupt */
+
ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
if (master_ctl & GEN8_DE_MISC_IRQ) {
tmp = I915_READ(GEN8_DE_MISC_IIR);
- if (tmp & GEN8_DE_MISC_GSE)
- intel_opregion_asle_intr(dev);
- else if (tmp)
- DRM_ERROR("Unexpected DE Misc interrupt\n");
- else
- DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
-
if (tmp) {
I915_WRITE(GEN8_DE_MISC_IIR, tmp);
ret = IRQ_HANDLED;
+ if (tmp & GEN8_DE_MISC_GSE)
+ intel_opregion_asle_intr(dev);
+ else
+ DRM_ERROR("Unexpected DE Misc interrupt\n");
}
+ else
+ DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
}
if (master_ctl & GEN8_DE_PORT_IRQ) {
tmp = I915_READ(GEN8_DE_PORT_IIR);
- if (tmp & GEN8_AUX_CHANNEL_A)
- dp_aux_irq_handler(dev);
- else if (tmp)
- DRM_ERROR("Unexpected DE Port interrupt\n");
- else
- DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
-
if (tmp) {
I915_WRITE(GEN8_DE_PORT_IIR, tmp);
ret = IRQ_HANDLED;
+ if (tmp & GEN8_AUX_CHANNEL_A)
+ dp_aux_irq_handler(dev);
+ else
+ DRM_ERROR("Unexpected DE Port interrupt\n");
}
+ else
+ DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
}
for_each_pipe(pipe) {
@@ -2254,33 +2556,32 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
continue;
pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
- if (pipe_iir & GEN8_PIPE_VBLANK)
- intel_pipe_handle_vblank(dev, pipe);
-
- if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
- intel_prepare_page_flip(dev, pipe);
- intel_finish_page_flip_plane(dev, pipe);
- }
+ if (pipe_iir) {
+ ret = IRQ_HANDLED;
+ I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+ if (pipe_iir & GEN8_PIPE_VBLANK)
+ intel_pipe_handle_vblank(dev, pipe);
- if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
- hsw_pipe_crc_irq_handler(dev, pipe);
+ if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
+ intel_prepare_page_flip(dev, pipe);
+ intel_finish_page_flip_plane(dev, pipe);
+ }
- if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
- if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
- false))
- DRM_ERROR("Pipe %c FIFO underrun\n",
- pipe_name(pipe));
- }
+ if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
+ hsw_pipe_crc_irq_handler(dev, pipe);
- if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
- DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
- pipe_name(pipe),
- pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
- }
+ if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
+ if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
+ false))
+ DRM_ERROR("Pipe %c FIFO underrun\n",
+ pipe_name(pipe));
+ }
- if (pipe_iir) {
- ret = IRQ_HANDLED;
- I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+ if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
+ DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+ pipe_name(pipe),
+ pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
+ }
} else
DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
}
@@ -2292,13 +2593,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
* on older pch-split platforms. But this needs testing.
*/
u32 pch_iir = I915_READ(SDEIIR);
-
- cpt_irq_handler(dev, pch_iir);
-
if (pch_iir) {
I915_WRITE(SDEIIR, pch_iir);
ret = IRQ_HANDLED;
- }
+ cpt_irq_handler(dev, pch_iir);
+ } else
+ DRM_ERROR("The master control interrupt lied (SDE)!\n");
+
}
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@@ -2753,12 +3054,7 @@ static bool
ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
{
if (INTEL_INFO(dev)->gen >= 8) {
- /*
- * FIXME: gen8 semaphore support - currently we don't emit
- * semaphores on bdw anyway, but this needs to be addressed when
- * we merge that code.
- */
- return false;
+ return (ipehr >> 23) == 0x1c;
} else {
ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
@@ -2767,19 +3063,20 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
}
static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
+semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_engine_cs *signaller;
int i;
if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
- /*
- * FIXME: gen8 semaphore support - currently we don't emit
- * semaphores on bdw anyway, but this needs to be addressed when
- * we merge that code.
- */
- return NULL;
+ for_each_ring(signaller, dev_priv, i) {
+ if (ring == signaller)
+ continue;
+
+ if (offset == signaller->semaphore.signal_ggtt[ring->id])
+ return signaller;
+ }
} else {
u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
@@ -2792,8 +3089,8 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr)
}
}
- DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n",
- ring->id, ipehr);
+ DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
+ ring->id, ipehr, offset);
return NULL;
}
@@ -2803,7 +3100,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 cmd, ipehr, head;
- int i;
+ u64 offset = 0;
+ int i, backwards;
ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
@@ -2812,13 +3110,15 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
/*
* HEAD is likely pointing to the dword after the actual command,
* so scan backwards until we find the MBOX. But limit it to just 3
- * dwords. Note that we don't care about ACTHD here since that might
+ * or 4 dwords depending on the semaphore wait command size.
+ * Note that we don't care about ACTHD here since that might
* point at at batch, and semaphores are always emitted into the
* ringbuffer itself.
*/
head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
- for (i = 4; i; --i) {
+ for (i = backwards; i; --i) {
/*
* Be paranoid and presume the hw has gone off into the wild -
* our ring is smaller than what the hardware (and hence
@@ -2838,7 +3138,12 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
return NULL;
*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
- return semaphore_wait_to_signaller_ring(ring, ipehr);
+ if (INTEL_INFO(ring->dev)->gen >= 8) {
+ offset = ioread32(ring->buffer->virtual_start + head + 12);
+ offset <<= 32;
+ offset = ioread32(ring->buffer->virtual_start + head + 8);
+ }
+ return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
}
static int semaphore_passed(struct intel_engine_cs *ring)
@@ -3159,7 +3464,9 @@ static void gen8_irq_reset(struct drm_device *dev)
gen8_gt_irq_reset(dev_priv);
for_each_pipe(pipe)
- GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+ if (intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(pipe)))
+ GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
GEN5_IRQ_RESET(GEN8_DE_PORT_);
GEN5_IRQ_RESET(GEN8_DE_MISC_);
@@ -3168,6 +3475,18 @@ static void gen8_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev);
}
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
+ ~dev_priv->de_irq_mask[PIPE_B]);
+ GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
+ ~dev_priv->de_irq_mask[PIPE_C]);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
static void cherryview_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3492,8 +3811,11 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
for_each_pipe(pipe)
- GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe],
- de_pipe_enables);
+ if (intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(pipe)))
+ GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
+ dev_priv->de_irq_mask[pipe],
+ de_pipe_enables);
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
}
@@ -4324,12 +4646,17 @@ void intel_irq_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
+ INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
/* Let's track the enabled rps events */
- dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
+ if (IS_VALLEYVIEW(dev))
+ /* WaGsvRC0ResidenncyMethod:VLV */
+ dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
+ else
+ dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
setup_timer(&dev_priv->gpu_error.hangcheck_timer,
i915_hangcheck_elapsed,
@@ -4339,6 +4666,9 @@ void intel_irq_init(struct drm_device *dev)
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+ /* Haven't installed the IRQ handler yet */
+ dev_priv->pm._irqs_disabled = true;
+
if (IS_GEN2(dev)) {
dev->max_vblank_count = 0;
dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
@@ -4426,7 +4756,9 @@ void intel_hpd_init(struct drm_device *dev)
list_for_each_entry(connector, &mode_config->connector_list, head) {
struct intel_connector *intel_connector = to_intel_connector(connector);
connector->polled = intel_connector->polled;
- if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+ if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ if (intel_connector->mst_port)
connector->polled = DRM_CONNECTOR_POLL_HPD;
}
@@ -4444,7 +4776,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
dev->driver->irq_uninstall(dev);
- dev_priv->pm.irqs_disabled = true;
+ dev_priv->pm._irqs_disabled = true;
}
/* Restore interrupts so we can recover from runtime PM. */
@@ -4452,7 +4784,7 @@ void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- dev_priv->pm.irqs_disabled = false;
+ dev_priv->pm._irqs_disabled = false;
dev->driver->irq_preinstall(dev);
dev->driver->irq_postinstall(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index d05a2afa17dc..62ee8308d682 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -37,7 +37,7 @@ struct i915_params i915 __read_mostly = {
.enable_fbc = -1,
.enable_hangcheck = true,
.enable_ppgtt = -1,
- .enable_psr = 0,
+ .enable_psr = 1,
.preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
.disable_power_well = 1,
.enable_ips = 1,
@@ -48,6 +48,8 @@ struct i915_params i915 __read_mostly = {
.disable_display = 0,
.enable_cmd_parser = 1,
.disable_vtd_wa = 0,
+ .use_mmio_flip = 0,
+ .mmio_debug = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -117,7 +119,7 @@ MODULE_PARM_DESC(enable_ppgtt,
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
module_param_named(enable_psr, i915.enable_psr, int, 0600);
-MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
+MODULE_PARM_DESC(enable_psr, "Enable PSR (default: true)");
module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
MODULE_PARM_DESC(preliminary_hw_support,
@@ -156,3 +158,12 @@ MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)"
module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
MODULE_PARM_DESC(enable_cmd_parser,
"Enable command parsing (1=enabled [default], 0=disabled)");
+
+module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600);
+MODULE_PARM_DESC(use_mmio_flip,
+ "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
+
+module_param_named(mmio_debug, i915.mmio_debug, bool, 0600);
+MODULE_PARM_DESC(mmio_debug,
+ "Enable the MMIO debug code (default: false). This may negatively "
+ "affect performance.");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a5bab61bfc00..fe5c27630e95 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -29,8 +29,8 @@
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
-#define _PIPE3(pipe, a, b, c) (pipe < 2 ? _PIPE(pipe, a, b) : c)
-#define _PORT3(port, a, b, c) (port < 2 ? _PORT(port, a, b) : c)
+#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
+ (pipe) == PIPE_B ? (b) : (c))
#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
#define _MASKED_BIT_DISABLE(a) ((a) << 16)
@@ -240,7 +240,7 @@
#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
-#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
+#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
#define MI_SEMAPHORE_COMPARE (1<<20)
@@ -266,6 +266,11 @@
#define MI_RESTORE_EXT_STATE_EN (1<<2)
#define MI_FORCE_RESTORE (1<<1)
#define MI_RESTORE_INHIBIT (1<<0)
+#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
+#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
+#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
+#define MI_SEMAPHORE_POLL (1<<15)
+#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
@@ -360,6 +365,7 @@
#define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */
#define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9)
#define PIPE_CONTROL_NOTIFY (1<<8)
+#define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */
#define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4)
#define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3)
#define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2)
@@ -525,10 +531,21 @@ enum punit_power_well {
#define PUNIT_REG_GPU_FREQ_STS 0xd8
#define GENFREQSTATUS (1<<0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
+#define PUNIT_REG_CZ_TIMESTAMP 0xce
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */
+#define PUNIT_GPU_STATUS_REG 0xdb
+#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16
+#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff
+#define PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT 8
+#define PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK 0xff
+
+#define PUNIT_GPU_DUTYCYCLE_REG 0xdf
+#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT 8
+#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK 0xff
+
#define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c
#define FB_GFX_MAX_FREQ_FUSE_SHIFT 3
#define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8
@@ -540,6 +557,11 @@ enum punit_power_well {
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
+#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
+#define VLV_RP_UP_EI_THRESHOLD 90
+#define VLV_RP_DOWN_EI_THRESHOLD 70
+#define VLV_INT_COUNT_FOR_DOWN_EI 5
+
/* vlv2 north clock has */
#define CCK_FUSE_REG 0x8
#define CCK_FUSE_HPLL_FREQ_MASK 0x3
@@ -574,6 +596,11 @@ enum punit_power_well {
#define DSI_PLL_M1_DIV_SHIFT 0
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
+#define DISPLAY_TRUNK_FORCE_ON (1 << 17)
+#define DISPLAY_TRUNK_FORCE_OFF (1 << 16)
+#define DISPLAY_FREQUENCY_STATUS (0x1f << 8)
+#define DISPLAY_FREQUENCY_STATUS_SHIFT 8
+#define DISPLAY_FREQUENCY_VALUES (0x1f << 0)
/**
* DOC: DPIO
@@ -761,6 +788,8 @@ enum punit_power_well {
#define _VLV_PCS_DW8_CH0 0x8220
#define _VLV_PCS_DW8_CH1 0x8420
+#define CHV_PCS_USEDCLKCHANNEL_OVRRIDE (1 << 20)
+#define CHV_PCS_USEDCLKCHANNEL (1 << 21)
#define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1)
#define _VLV_PCS01_DW8_CH0 0x0220
@@ -869,6 +898,16 @@ enum punit_power_well {
#define DPIO_CHV_PROP_COEFF_SHIFT 0
#define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1)
+#define _CHV_CMN_DW5_CH0 0x8114
+#define CHV_BUFRIGHTENA1_DISABLE (0 << 20)
+#define CHV_BUFRIGHTENA1_NORMAL (1 << 20)
+#define CHV_BUFRIGHTENA1_FORCE (3 << 20)
+#define CHV_BUFRIGHTENA1_MASK (3 << 20)
+#define CHV_BUFLEFTENA1_DISABLE (0 << 22)
+#define CHV_BUFLEFTENA1_NORMAL (1 << 22)
+#define CHV_BUFLEFTENA1_FORCE (3 << 22)
+#define CHV_BUFLEFTENA1_MASK (3 << 22)
+
#define _CHV_CMN_DW13_CH0 0x8134
#define _CHV_CMN_DW0_CH1 0x8080
#define DPIO_CHV_S1_DIV_SHIFT 21
@@ -883,8 +922,21 @@ enum punit_power_well {
#define _CHV_CMN_DW1_CH1 0x8084
#define DPIO_AFC_RECAL (1 << 14)
#define DPIO_DCLKP_EN (1 << 13)
+#define CHV_BUFLEFTENA2_DISABLE (0 << 17) /* CL2 DW1 only */
+#define CHV_BUFLEFTENA2_NORMAL (1 << 17) /* CL2 DW1 only */
+#define CHV_BUFLEFTENA2_FORCE (3 << 17) /* CL2 DW1 only */
+#define CHV_BUFLEFTENA2_MASK (3 << 17) /* CL2 DW1 only */
+#define CHV_BUFRIGHTENA2_DISABLE (0 << 19) /* CL2 DW1 only */
+#define CHV_BUFRIGHTENA2_NORMAL (1 << 19) /* CL2 DW1 only */
+#define CHV_BUFRIGHTENA2_FORCE (3 << 19) /* CL2 DW1 only */
+#define CHV_BUFRIGHTENA2_MASK (3 << 19) /* CL2 DW1 only */
#define CHV_CMN_DW14(ch) _PIPE(ch, _CHV_CMN_DW14_CH0, _CHV_CMN_DW1_CH1)
+#define _CHV_CMN_DW19_CH0 0x814c
+#define _CHV_CMN_DW6_CH1 0x8098
+#define CHV_CMN_USEDCLKCHANNEL (1 << 13)
+#define CHV_CMN_DW19(ch) _PIPE(ch, _CHV_CMN_DW19_CH0, _CHV_CMN_DW6_CH1)
+
#define CHV_CMN_DW30 0x8178
#define DPIO_LRC_BYPASS (1 << 3)
@@ -933,6 +985,7 @@ enum punit_power_well {
#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
#define GEN7_FENCE_MAX_PITCH_VAL 0x0800
+
/* control register for cpu gtt access */
#define TILECTL 0x101000
#define TILECTL_SWZCTL (1 << 0)
@@ -1170,6 +1223,8 @@ enum punit_power_well {
#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
#define VLV_PCBR (VLV_DISPLAY_BASE + 0x2120)
+#define VLV_PCBR_ADDR_SHIFT 12
+
#define DISPLAY_PLANE_FLIP_PENDING(plane) (1<<(11-(plane))) /* A and B only */
#define EIR 0x020b0
#define EMR 0x020b4
@@ -1570,11 +1625,10 @@ enum punit_power_well {
/*
* Clock control & power management
*/
-#define DPLL_A_OFFSET 0x6014
-#define DPLL_B_OFFSET 0x6018
-#define CHV_DPLL_C_OFFSET 0x6030
-#define DPLL(pipe) (dev_priv->info.dpll_offsets[pipe] + \
- dev_priv->info.display_mmio_offset)
+#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
+#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
+#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030)
+#define DPLL(pipe) _PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
#define VGA0 0x6000
#define VGA1 0x6004
@@ -1662,11 +1716,10 @@ enum punit_power_well {
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
-#define DPLL_A_MD_OFFSET 0x601c /* 965+ only */
-#define DPLL_B_MD_OFFSET 0x6020 /* 965+ only */
-#define CHV_DPLL_C_MD_OFFSET 0x603c
-#define DPLL_MD(pipe) (dev_priv->info.dpll_md_offsets[pipe] + \
- dev_priv->info.display_mmio_offset)
+#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
+#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
+#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c)
+#define DPLL_MD(pipe) _PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
@@ -2231,7 +2284,7 @@ enum punit_power_well {
/* Same as Haswell, but 72064 bytes now. */
#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
-
+#define CHV_CLK_CTL1 0x101100
#define VLV_CLK_CTL2 0x101104
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
@@ -2376,6 +2429,7 @@ enum punit_power_well {
#define EDP_PSR_BASE(dev) (IS_HASWELL(dev) ? 0x64800 : 0x6f800)
#define EDP_PSR_CTL(dev) (EDP_PSR_BASE(dev) + 0)
#define EDP_PSR_ENABLE (1<<31)
+#define BDW_PSR_SINGLE_FRAME (1<<30)
#define EDP_PSR_LINK_DISABLE (0<<27)
#define EDP_PSR_LINK_STANDBY (1<<27)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
@@ -2533,8 +2587,14 @@ enum punit_power_well {
#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
+#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
+#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
+#define PORTC_HOTPLUG_INT_LONG_PULSE (2 << 19)
+#define PORTC_HOTPLUG_INT_SHORT_PULSE (1 << 19)
#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
+#define PORTB_HOTPLUG_INT_LONG_PULSE (2 << 17)
+#define PORTB_HOTPLUG_INT_SHORT_PLUSE (1 << 17)
/* CRT/TV common between gen3+ */
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
@@ -2588,7 +2648,7 @@ enum punit_power_well {
#define PORT_DFT_I9XX 0x61150
#define DC_BALANCE_RESET (1 << 25)
-#define PORT_DFT2_G4X 0x61154
+#define PORT_DFT2_G4X (dev_priv->info.display_mmio_offset + 0x61154)
#define DC_BALANCE_RESET_VLV (1 << 31)
#define PIPE_SCRAMBLE_RESET_MASK (0x3 << 0)
#define PIPE_B_SCRAMBLE_RESET (1 << 1)
@@ -4630,6 +4690,8 @@ enum punit_power_well {
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
#define GEN7_L3AGDIS (1<<19)
+#define GEN7_L3CNTLREG2 0xB020
+#define GEN7_L3CNTLREG3 0xB024
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
@@ -4876,8 +4938,7 @@ enum punit_power_well {
#define _PCH_TRANSA_LINK_M2 0xe0048
#define _PCH_TRANSA_LINK_N2 0xe004c
-/* Per-transcoder DIP controls */
-
+/* Per-transcoder DIP controls (PCH) */
#define _VIDEO_DIP_CTL_A 0xe0200
#define _VIDEO_DIP_DATA_A 0xe0208
#define _VIDEO_DIP_GCP_A 0xe0210
@@ -4890,6 +4951,7 @@ enum punit_power_well {
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
+/* Per-transcoder DIP controls (VLV) */
#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
@@ -4898,12 +4960,19 @@ enum punit_power_well {
#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
+#define CHV_VIDEO_DIP_CTL_C (VLV_DISPLAY_BASE + 0x611f0)
+#define CHV_VIDEO_DIP_DATA_C (VLV_DISPLAY_BASE + 0x611f4)
+#define CHV_VIDEO_DIP_GDCP_PAYLOAD_C (VLV_DISPLAY_BASE + 0x611f8)
+
#define VLV_TVIDEO_DIP_CTL(pipe) \
- _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
+ _PIPE3((pipe), VLV_VIDEO_DIP_CTL_A, \
+ VLV_VIDEO_DIP_CTL_B, CHV_VIDEO_DIP_CTL_C)
#define VLV_TVIDEO_DIP_DATA(pipe) \
- _PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
+ _PIPE3((pipe), VLV_VIDEO_DIP_DATA_A, \
+ VLV_VIDEO_DIP_DATA_B, CHV_VIDEO_DIP_DATA_C)
#define VLV_TVIDEO_DIP_GCP(pipe) \
- _PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
+ _PIPE3((pipe), VLV_VIDEO_DIP_GDCP_PAYLOAD_A, \
+ VLV_VIDEO_DIP_GDCP_PAYLOAD_B, CHV_VIDEO_DIP_GDCP_PAYLOAD_C)
/* Haswell DIP controls */
#define HSW_VIDEO_DIP_CTL_A 0x60200
@@ -5334,6 +5403,7 @@ enum punit_power_well {
#define VLV_GTLC_ALLOWWAKEERR (1 << 1)
#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
+#define VLV_GTLC_SURVIVABILITY_REG 0x130098
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
@@ -5471,6 +5541,12 @@ enum punit_power_well {
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
+#define CHV_CZ_CLOCK_FREQ_MODE_200 200
+#define CHV_CZ_CLOCK_FREQ_MODE_267 267
+#define CHV_CZ_CLOCK_FREQ_MODE_320 320
+#define CHV_CZ_CLOCK_FREQ_MODE_333 333
+#define CHV_CZ_CLOCK_FREQ_MODE_400 400
+
#define GEN7_GT_SCRATCH_BASE 0x4F100
#define GEN7_GT_SCRATCH_REG_NUM 8
@@ -5481,6 +5557,8 @@ enum punit_power_well {
#define GEN6_GT_GFX_RC6_LOCKED 0x138104
#define VLV_COUNTER_CONTROL 0x138104
#define VLV_COUNT_RANGE_HIGH (1<<15)
+#define VLV_MEDIA_RC0_COUNT_EN (1<<5)
+#define VLV_RENDER_RC0_COUNT_EN (1<<4)
#define VLV_MEDIA_RC6_COUNT_EN (1<<1)
#define VLV_RENDER_RC6_COUNT_EN (1<<0)
#define GEN6_GT_GFX_RC6 0x138108
@@ -5489,6 +5567,8 @@ enum punit_power_well {
#define GEN6_GT_GFX_RC6p 0x13810C
#define GEN6_GT_GFX_RC6pp 0x138110
+#define VLV_RENDER_C0_COUNT_REG 0x138118
+#define VLV_MEDIA_C0_COUNT_REG 0x13811C
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
@@ -5723,6 +5803,7 @@ enum punit_power_well {
#define TRANS_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
#define TRANS_DDI_PORT_MASK (7<<28)
+#define TRANS_DDI_PORT_SHIFT 28
#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
#define TRANS_DDI_PORT_NONE (0<<28)
#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
@@ -5743,6 +5824,7 @@ enum punit_power_well {
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
+#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8)
#define TRANS_DDI_BFI_ENABLE (1<<4)
/* DisplayPort Transport Control */
@@ -5752,6 +5834,7 @@ enum punit_power_well {
#define DP_TP_CTL_ENABLE (1<<31)
#define DP_TP_CTL_MODE_SST (0<<27)
#define DP_TP_CTL_MODE_MST (1<<27)
+#define DP_TP_CTL_FORCE_ACT (1<<25)
#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
@@ -5766,15 +5849,19 @@ enum punit_power_well {
#define DP_TP_STATUS_A 0x64044
#define DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
-#define DP_TP_STATUS_IDLE_DONE (1<<25)
-#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
+#define DP_TP_STATUS_IDLE_DONE (1<<25)
+#define DP_TP_STATUS_ACT_SENT (1<<24)
+#define DP_TP_STATUS_MODE_STATUS_MST (1<<23)
+#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC2 (3 << 8)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC1 (3 << 4)
+#define DP_TP_STATUS_PAYLOAD_MAPPING_VC0 (3 << 0)
/* DDI Buffer Control */
#define DDI_BUF_CTL_A 0x64000
#define DDI_BUF_CTL_B 0x64100
#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
#define DDI_BUF_CTL_ENABLE (1<<31)
-/* Haswell */
#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
@@ -5784,16 +5871,6 @@ enum punit_power_well {
#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
-/* Broadwell */
-#define DDI_BUF_EMP_400MV_0DB_BDW (0<<24) /* Sel0 */
-#define DDI_BUF_EMP_400MV_3_5DB_BDW (1<<24) /* Sel1 */
-#define DDI_BUF_EMP_400MV_6DB_BDW (2<<24) /* Sel2 */
-#define DDI_BUF_EMP_600MV_0DB_BDW (3<<24) /* Sel3 */
-#define DDI_BUF_EMP_600MV_3_5DB_BDW (4<<24) /* Sel4 */
-#define DDI_BUF_EMP_600MV_6DB_BDW (5<<24) /* Sel5 */
-#define DDI_BUF_EMP_800MV_0DB_BDW (6<<24) /* Sel6 */
-#define DDI_BUF_EMP_800MV_3_5DB_BDW (7<<24) /* Sel7 */
-#define DDI_BUF_EMP_1200MV_0DB_BDW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_PORT_REVERSAL (1<<16)
#define DDI_BUF_IS_IDLE (1<<7)
@@ -5861,10 +5938,12 @@ enum punit_power_well {
/* WRPLL */
#define WRPLL_CTL1 0x46040
#define WRPLL_CTL2 0x46060
+#define WRPLL_CTL(pll) (pll == 0 ? WRPLL_CTL1 : WRPLL_CTL2)
#define WRPLL_PLL_ENABLE (1<<31)
-#define WRPLL_PLL_SELECT_SSC (0x01<<28)
-#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
-#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
+#define WRPLL_PLL_SSC (1<<28)
+#define WRPLL_PLL_NON_SSC (2<<28)
+#define WRPLL_PLL_LCPLL (3<<28)
+#define WRPLL_PLL_REF_MASK (3<<28)
/* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
#define WRPLL_DIVIDER_REF_MASK (0xff)
@@ -5883,6 +5962,7 @@ enum punit_power_well {
#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
#define PORT_CLK_SEL_LCPLL_810 (2<<29)
#define PORT_CLK_SEL_SPLL (3<<29)
+#define PORT_CLK_SEL_WRPLL(pll) (((pll)+4)<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29)
#define PORT_CLK_SEL_NONE (7<<29)
@@ -5924,7 +6004,10 @@ enum punit_power_well {
#define LCPLL_CD_SOURCE_FCLK (1<<21)
#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
-#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
+/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
+ * since on HSW we can't write to it using I915_WRITE. */
+#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
+#define D_COMP_BDW 0x138144
#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
#define D_COMP_COMP_FORCE (1<<8)
#define D_COMP_COMP_DISABLE (1<<0)
@@ -6005,7 +6088,8 @@ enum punit_power_well {
#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
-#define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL)
+#define MIPI_PORT_CTRL(tc) _TRANSCODER(tc, _MIPIA_PORT_CTRL, \
+ _MIPIB_PORT_CTRL)
#define DPI_ENABLE (1 << 31) /* A + B */
#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
@@ -6047,18 +6131,20 @@ enum punit_power_well {
#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194)
#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704)
-#define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
+#define MIPI_TEARING_CTRL(tc) _TRANSCODER(tc, \
+ _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
#define TEARING_EFFECT_DELAY_SHIFT 0
#define TEARING_EFFECT_DELAY_MASK (0xffff << 0)
/* XXX: all bits reserved */
-#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
+#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0)
/* MIPI DSI Controller and D-PHY registers */
-#define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000)
-#define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800)
-#define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY)
+#define _MIPIA_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb000)
+#define _MIPIB_DEVICE_READY (dev_priv->mipi_mmio_base + 0xb800)
+#define MIPI_DEVICE_READY(tc) _TRANSCODER(tc, _MIPIA_DEVICE_READY, \
+ _MIPIB_DEVICE_READY)
#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */
#define ULPS_STATE_MASK (3 << 1)
#define ULPS_STATE_ENTER (2 << 1)
@@ -6066,12 +6152,14 @@ enum punit_power_well {
#define ULPS_STATE_NORMAL_OPERATION (0 << 1)
#define DEVICE_READY (1 << 0)
-#define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004)
-#define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804)
-#define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT)
-#define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008)
-#define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808)
-#define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN)
+#define _MIPIA_INTR_STAT (dev_priv->mipi_mmio_base + 0xb004)
+#define _MIPIB_INTR_STAT (dev_priv->mipi_mmio_base + 0xb804)
+#define MIPI_INTR_STAT(tc) _TRANSCODER(tc, _MIPIA_INTR_STAT, \
+ _MIPIB_INTR_STAT)
+#define _MIPIA_INTR_EN (dev_priv->mipi_mmio_base + 0xb008)
+#define _MIPIB_INTR_EN (dev_priv->mipi_mmio_base + 0xb808)
+#define MIPI_INTR_EN(tc) _TRANSCODER(tc, _MIPIA_INTR_EN, \
+ _MIPIB_INTR_EN)
#define TEARING_EFFECT (1 << 31)
#define SPL_PKT_SENT_INTERRUPT (1 << 30)
#define GEN_READ_DATA_AVAIL (1 << 29)
@@ -6105,9 +6193,10 @@ enum punit_power_well {
#define RXSOT_SYNC_ERROR (1 << 1)
#define RXSOT_ERROR (1 << 0)
-#define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c)
-#define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c)
-#define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG)
+#define _MIPIA_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb00c)
+#define _MIPIB_DSI_FUNC_PRG (dev_priv->mipi_mmio_base + 0xb80c)
+#define MIPI_DSI_FUNC_PRG(tc) _TRANSCODER(tc, _MIPIA_DSI_FUNC_PRG, \
+ _MIPIB_DSI_FUNC_PRG)
#define CMD_MODE_DATA_WIDTH_MASK (7 << 13)
#define CMD_MODE_NOT_SUPPORTED (0 << 13)
#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13)
@@ -6128,78 +6217,94 @@ enum punit_power_well {
#define DATA_LANES_PRG_REG_SHIFT 0
#define DATA_LANES_PRG_REG_MASK (7 << 0)
-#define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010)
-#define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810)
-#define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT)
+#define _MIPIA_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb010)
+#define _MIPIB_HS_TX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb810)
+#define MIPI_HS_TX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_HS_TX_TIMEOUT, \
+ _MIPIB_HS_TX_TIMEOUT)
#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014)
-#define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814)
-#define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT)
+#define _MIPIA_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb014)
+#define _MIPIB_LP_RX_TIMEOUT (dev_priv->mipi_mmio_base + 0xb814)
+#define MIPI_LP_RX_TIMEOUT(tc) _TRANSCODER(tc, _MIPIA_LP_RX_TIMEOUT, \
+ _MIPIB_LP_RX_TIMEOUT)
#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff
-#define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018)
-#define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818)
-#define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
+#define _MIPIA_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb018)
+#define _MIPIB_TURN_AROUND_TIMEOUT (dev_priv->mipi_mmio_base + 0xb818)
+#define MIPI_TURN_AROUND_TIMEOUT(tc) _TRANSCODER(tc, \
+ _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
#define TURN_AROUND_TIMEOUT_MASK 0x3f
-#define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c)
-#define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c)
-#define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
+#define _MIPIA_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb01c)
+#define _MIPIB_DEVICE_RESET_TIMER (dev_priv->mipi_mmio_base + 0xb81c)
+#define MIPI_DEVICE_RESET_TIMER(tc) _TRANSCODER(tc, \
+ _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
#define DEVICE_RESET_TIMER_MASK 0xffff
-#define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020)
-#define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820)
-#define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION)
+#define _MIPIA_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb020)
+#define _MIPIB_DPI_RESOLUTION (dev_priv->mipi_mmio_base + 0xb820)
+#define MIPI_DPI_RESOLUTION(tc) _TRANSCODER(tc, _MIPIA_DPI_RESOLUTION, \
+ _MIPIB_DPI_RESOLUTION)
#define VERTICAL_ADDRESS_SHIFT 16
#define VERTICAL_ADDRESS_MASK (0xffff << 16)
#define HORIZONTAL_ADDRESS_SHIFT 0
#define HORIZONTAL_ADDRESS_MASK 0xffff
-#define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024)
-#define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824)
-#define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
+#define _MIPIA_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb024)
+#define _MIPIB_DBI_FIFO_THROTTLE (dev_priv->mipi_mmio_base + 0xb824)
+#define MIPI_DBI_FIFO_THROTTLE(tc) _TRANSCODER(tc, \
+ _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
#define DBI_FIFO_EMPTY_HALF (0 << 0)
#define DBI_FIFO_EMPTY_QUARTER (1 << 0)
#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0)
/* regs below are bits 15:0 */
-#define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028)
-#define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828)
-#define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
-
-#define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c)
-#define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c)
-#define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT)
-
-#define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030)
-#define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830)
-#define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT)
-
-#define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034)
-#define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834)
-#define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
-
-#define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038)
-#define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838)
-#define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+#define _MIPIA_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb028)
+#define _MIPIB_HSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb828)
+#define MIPI_HSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
+ _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
+
+#define _MIPIA_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb02c)
+#define _MIPIB_HBP_COUNT (dev_priv->mipi_mmio_base + 0xb82c)
+#define MIPI_HBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HBP_COUNT, \
+ _MIPIB_HBP_COUNT)
+
+#define _MIPIA_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb030)
+#define _MIPIB_HFP_COUNT (dev_priv->mipi_mmio_base + 0xb830)
+#define MIPI_HFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_HFP_COUNT, \
+ _MIPIB_HFP_COUNT)
+
+#define _MIPIA_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb034)
+#define _MIPIB_HACTIVE_AREA_COUNT (dev_priv->mipi_mmio_base + 0xb834)
+#define MIPI_HACTIVE_AREA_COUNT(tc) _TRANSCODER(tc, \
+ _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
+
+#define _MIPIA_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb038)
+#define _MIPIB_VSYNC_PADDING_COUNT (dev_priv->mipi_mmio_base + 0xb838)
+#define MIPI_VSYNC_PADDING_COUNT(tc) _TRANSCODER(tc, \
+ _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+
+#define _MIPIA_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb03c)
+#define _MIPIB_VBP_COUNT (dev_priv->mipi_mmio_base + 0xb83c)
+#define MIPI_VBP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VBP_COUNT, \
+ _MIPIB_VBP_COUNT)
+
+#define _MIPIA_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb040)
+#define _MIPIB_VFP_COUNT (dev_priv->mipi_mmio_base + 0xb840)
+#define MIPI_VFP_COUNT(tc) _TRANSCODER(tc, _MIPIA_VFP_COUNT, \
+ _MIPIB_VFP_COUNT)
+
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb044)
+#define _MIPIB_HIGH_LOW_SWITCH_COUNT (dev_priv->mipi_mmio_base + 0xb844)
+#define MIPI_HIGH_LOW_SWITCH_COUNT(tc) _TRANSCODER(tc, \
+ _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
-#define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c)
-#define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c)
-#define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
-
-#define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040)
-#define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840)
-#define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
-
-#define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044)
-#define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844)
-#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
/* regs above are bits 15:0 */
-#define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048)
-#define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848)
-#define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL)
+#define _MIPIA_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb048)
+#define _MIPIB_DPI_CONTROL (dev_priv->mipi_mmio_base + 0xb848)
+#define MIPI_DPI_CONTROL(tc) _TRANSCODER(tc, _MIPIA_DPI_CONTROL, \
+ _MIPIB_DPI_CONTROL)
#define DPI_LP_MODE (1 << 6)
#define BACKLIGHT_OFF (1 << 5)
#define BACKLIGHT_ON (1 << 4)
@@ -6208,27 +6313,31 @@ enum punit_power_well {
#define TURN_ON (1 << 1)
#define SHUTDOWN (1 << 0)
-#define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c)
-#define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c)
-#define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA)
+#define _MIPIA_DPI_DATA (dev_priv->mipi_mmio_base + 0xb04c)
+#define _MIPIB_DPI_DATA (dev_priv->mipi_mmio_base + 0xb84c)
+#define MIPI_DPI_DATA(tc) _TRANSCODER(tc, _MIPIA_DPI_DATA, \
+ _MIPIB_DPI_DATA)
#define COMMAND_BYTE_SHIFT 0
#define COMMAND_BYTE_MASK (0x3f << 0)
-#define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050)
-#define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850)
-#define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT)
+#define _MIPIA_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb050)
+#define _MIPIB_INIT_COUNT (dev_priv->mipi_mmio_base + 0xb850)
+#define MIPI_INIT_COUNT(tc) _TRANSCODER(tc, _MIPIA_INIT_COUNT, \
+ _MIPIB_INIT_COUNT)
#define MASTER_INIT_TIMER_SHIFT 0
#define MASTER_INIT_TIMER_MASK (0xffff << 0)
-#define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054)
-#define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854)
-#define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
+#define _MIPIA_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb054)
+#define _MIPIB_MAX_RETURN_PKT_SIZE (dev_priv->mipi_mmio_base + 0xb854)
+#define MIPI_MAX_RETURN_PKT_SIZE(tc) _TRANSCODER(tc, \
+ _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
#define MAX_RETURN_PKT_SIZE_SHIFT 0
#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0)
-#define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058)
-#define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858)
-#define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
+#define _MIPIA_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb058)
+#define _MIPIB_VIDEO_MODE_FORMAT (dev_priv->mipi_mmio_base + 0xb858)
+#define MIPI_VIDEO_MODE_FORMAT(tc) _TRANSCODER(tc, \
+ _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4)
#define DISABLE_VIDEO_BTA (1 << 3)
#define IP_TG_CONFIG (1 << 2)
@@ -6236,9 +6345,10 @@ enum punit_power_well {
#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0)
#define VIDEO_MODE_BURST (3 << 0)
-#define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c)
-#define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c)
-#define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE)
+#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
+#define _MIPIB_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
+#define MIPI_EOT_DISABLE(tc) _TRANSCODER(tc, _MIPIA_EOT_DISABLE, \
+ _MIPIB_EOT_DISABLE)
#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
@@ -6248,28 +6358,33 @@ enum punit_power_well {
#define CLOCKSTOP (1 << 1)
#define EOT_DISABLE (1 << 0)
-#define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060)
-#define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860)
-#define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK)
+#define _MIPIA_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb060)
+#define _MIPIB_LP_BYTECLK (dev_priv->mipi_mmio_base + 0xb860)
+#define MIPI_LP_BYTECLK(tc) _TRANSCODER(tc, _MIPIA_LP_BYTECLK, \
+ _MIPIB_LP_BYTECLK)
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
/* bits 31:0 */
-#define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064)
-#define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864)
-#define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA)
+#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
+#define _MIPIB_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
+#define MIPI_LP_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_DATA, \
+ _MIPIB_LP_GEN_DATA)
/* bits 31:0 */
-#define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068)
-#define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868)
-#define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA)
-
-#define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c)
-#define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c)
-#define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL)
-#define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070)
-#define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870)
-#define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL)
+#define _MIPIA_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb068)
+#define _MIPIB_HS_GEN_DATA (dev_priv->mipi_mmio_base + 0xb868)
+#define MIPI_HS_GEN_DATA(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_DATA, \
+ _MIPIB_HS_GEN_DATA)
+
+#define _MIPIA_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb06c)
+#define _MIPIB_LP_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb86c)
+#define MIPI_LP_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_LP_GEN_CTRL, \
+ _MIPIB_LP_GEN_CTRL)
+#define _MIPIA_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb070)
+#define _MIPIB_HS_GEN_CTRL (dev_priv->mipi_mmio_base + 0xb870)
+#define MIPI_HS_GEN_CTRL(tc) _TRANSCODER(tc, _MIPIA_HS_GEN_CTRL, \
+ _MIPIB_HS_GEN_CTRL)
#define LONG_PACKET_WORD_COUNT_SHIFT 8
#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8)
#define SHORT_PACKET_PARAM_SHIFT 8
@@ -6280,9 +6395,10 @@ enum punit_power_well {
#define DATA_TYPE_MASK (3f << 0)
/* data type values, see include/video/mipi_display.h */
-#define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074)
-#define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874)
-#define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT)
+#define _MIPIA_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb074)
+#define _MIPIB_GEN_FIFO_STAT (dev_priv->mipi_mmio_base + 0xb874)
+#define MIPI_GEN_FIFO_STAT(tc) _TRANSCODER(tc, _MIPIA_GEN_FIFO_STAT, \
+ _MIPIB_GEN_FIFO_STAT)
#define DPI_FIFO_EMPTY (1 << 28)
#define DBI_FIFO_EMPTY (1 << 27)
#define LP_CTRL_FIFO_EMPTY (1 << 26)
@@ -6298,16 +6414,18 @@ enum punit_power_well {
#define HS_DATA_FIFO_HALF_EMPTY (1 << 1)
#define HS_DATA_FIFO_FULL (1 << 0)
-#define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078)
-#define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878)
-#define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
+#define _MIPIA_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb078)
+#define _MIPIB_HS_LS_DBI_ENABLE (dev_priv->mipi_mmio_base + 0xb878)
+#define MIPI_HS_LP_DBI_ENABLE(tc) _TRANSCODER(tc, \
+ _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
#define DBI_HS_LP_MODE_MASK (1 << 0)
#define DBI_LP_MODE (1 << 0)
#define DBI_HS_MODE (0 << 0)
-#define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080)
-#define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880)
-#define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM)
+#define _MIPIA_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb080)
+#define _MIPIB_DPHY_PARAM (dev_priv->mipi_mmio_base + 0xb880)
+#define MIPI_DPHY_PARAM(tc) _TRANSCODER(tc, _MIPIA_DPHY_PARAM, \
+ _MIPIB_DPHY_PARAM)
#define EXIT_ZERO_COUNT_SHIFT 24
#define EXIT_ZERO_COUNT_MASK (0x3f << 24)
#define TRAIL_COUNT_SHIFT 16
@@ -6318,34 +6436,41 @@ enum punit_power_well {
#define PREPARE_COUNT_MASK (0x3f << 0)
/* bits 31:0 */
-#define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084)
-#define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884)
-#define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL)
-
-#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088)
-#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888)
-#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
+#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
+#define _MIPIB_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
+#define MIPI_DBI_BW_CTRL(tc) _TRANSCODER(tc, _MIPIA_DBI_BW_CTRL, \
+ _MIPIB_DBI_BW_CTRL)
+
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ + 0xb088)
+#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (dev_priv->mipi_mmio_base \
+ + 0xb888)
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(tc) _TRANSCODER(tc, \
+ _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
#define LP_HS_SSW_CNT_SHIFT 16
#define LP_HS_SSW_CNT_MASK (0xffff << 16)
#define HS_LP_PWR_SW_CNT_SHIFT 0
#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0)
-#define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c)
-#define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c)
-#define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
+#define _MIPIA_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb08c)
+#define _MIPIB_STOP_STATE_STALL (dev_priv->mipi_mmio_base + 0xb88c)
+#define MIPI_STOP_STATE_STALL(tc) _TRANSCODER(tc, \
+ _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
#define STOP_STATE_STALL_COUNTER_SHIFT 0
#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0)
-#define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090)
-#define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890)
-#define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
-#define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094)
-#define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894)
-#define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1)
+#define _MIPIA_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb090)
+#define _MIPIB_INTR_STAT_REG_1 (dev_priv->mipi_mmio_base + 0xb890)
+#define MIPI_INTR_STAT_REG_1(tc) _TRANSCODER(tc, \
+ _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
+#define _MIPIA_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb094)
+#define _MIPIB_INTR_EN_REG_1 (dev_priv->mipi_mmio_base + 0xb894)
+#define MIPI_INTR_EN_REG_1(tc) _TRANSCODER(tc, _MIPIA_INTR_EN_REG_1, \
+ _MIPIB_INTR_EN_REG_1)
#define RX_CONTENTION_DETECTED (1 << 0)
/* XXX: only pipe A ?!? */
-#define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100)
+#define MIPIA_DBI_TYPEC_CTRL (dev_priv->mipi_mmio_base + 0xb100)
#define DBI_TYPEC_ENABLE (1 << 31)
#define DBI_TYPEC_WIP (1 << 30)
#define DBI_TYPEC_OPTION_SHIFT 28
@@ -6359,9 +6484,10 @@ enum punit_power_well {
/* MIPI adapter registers */
-#define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104)
-#define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904)
-#define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL)
+#define _MIPIA_CTRL (dev_priv->mipi_mmio_base + 0xb104)
+#define _MIPIB_CTRL (dev_priv->mipi_mmio_base + 0xb904)
+#define MIPI_CTRL(tc) _TRANSCODER(tc, _MIPIA_CTRL, \
+ _MIPIB_CTRL)
#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */
#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5)
#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5)
@@ -6373,50 +6499,52 @@ enum punit_power_well {
#define READ_REQUEST_PRIORITY_HIGH (3 << 3)
#define RGB_FLIP_TO_BGR (1 << 2)
-#define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108)
-#define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908)
-#define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS)
+#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
+#define _MIPIB_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
+#define MIPI_DATA_ADDRESS(tc) _TRANSCODER(tc, _MIPIA_DATA_ADDRESS, \
+ _MIPIB_DATA_ADDRESS)
#define DATA_MEM_ADDRESS_SHIFT 5
#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define DATA_VALID (1 << 0)
-#define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c)
-#define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c)
-#define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH)
+#define _MIPIA_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb10c)
+#define _MIPIB_DATA_LENGTH (dev_priv->mipi_mmio_base + 0xb90c)
+#define MIPI_DATA_LENGTH(tc) _TRANSCODER(tc, _MIPIA_DATA_LENGTH, \
+ _MIPIB_DATA_LENGTH)
#define DATA_LENGTH_SHIFT 0
#define DATA_LENGTH_MASK (0xfffff << 0)
-#define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110)
-#define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910)
-#define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
+#define _MIPIA_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb110)
+#define _MIPIB_COMMAND_ADDRESS (dev_priv->mipi_mmio_base + 0xb910)
+#define MIPI_COMMAND_ADDRESS(tc) _TRANSCODER(tc, \
+ _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
#define COMMAND_MEM_ADDRESS_SHIFT 5
#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5)
#define AUTO_PWG_ENABLE (1 << 2)
#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1)
#define COMMAND_VALID (1 << 0)
-#define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114)
-#define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914)
-#define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH)
+#define _MIPIA_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb114)
+#define _MIPIB_COMMAND_LENGTH (dev_priv->mipi_mmio_base + 0xb914)
+#define MIPI_COMMAND_LENGTH(tc) _TRANSCODER(tc, _MIPIA_COMMAND_LENGTH, \
+ _MIPIB_COMMAND_LENGTH)
#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */
#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n)))
-#define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118)
-#define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918)
-#define MIPI_READ_DATA_RETURN(pipe, n) \
- (_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
+#define _MIPIA_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb118)
+#define _MIPIB_READ_DATA_RETURN0 (dev_priv->mipi_mmio_base + 0xb918)
+#define MIPI_READ_DATA_RETURN(tc, n) \
+ (_TRANSCODER(tc, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) \
+ + 4 * (n)) /* n: 0...7 */
-#define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138)
-#define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938)
-#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
+#define _MIPIA_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb138)
+#define _MIPIB_READ_DATA_VALID (dev_priv->mipi_mmio_base + 0xb938)
+#define MIPI_READ_DATA_VALID(tc) _TRANSCODER(tc, \
+ _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
/* For UMS only (deprecated): */
#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
-#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
-#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
-#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
-#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 86ce39aad0ff..ae7fd8fc27f0 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -47,22 +47,45 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
intel_runtime_pm_get(dev_priv);
- /* On VLV, residency time is in CZ units rather than 1.28us */
+ /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev)) {
- u32 clkctl2;
+ u32 reg, czcount_30ns;
- clkctl2 = I915_READ(VLV_CLK_CTL2) >>
- CLK_CTL2_CZCOUNT_30NS_SHIFT;
- if (!clkctl2) {
- WARN(!clkctl2, "bogus CZ count value");
+ if (IS_CHERRYVIEW(dev))
+ reg = CHV_CLK_CTL1;
+ else
+ reg = VLV_CLK_CTL2;
+
+ czcount_30ns = I915_READ(reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
+
+ if (!czcount_30ns) {
+ WARN(!czcount_30ns, "bogus CZ count value");
ret = 0;
goto out;
}
- units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
+
+ units = 0;
+ div = 1000000ULL;
+
+ if (IS_CHERRYVIEW(dev)) {
+ /* Special case for 320Mhz */
+ if (czcount_30ns == 1) {
+ div = 10000000ULL;
+ units = 3125ULL;
+ } else {
+ /* chv counts are one less */
+ czcount_30ns += 1;
+ }
+ }
+
+ if (units == 0)
+ units = DIV_ROUND_UP_ULL(30ULL * bias,
+ (u64)czcount_30ns);
+
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
units <<= 8;
- div = 1000000ULL * bias;
+ div = div * bias;
}
raw_time = I915_READ(reg) * units;
@@ -461,11 +484,20 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
mutex_unlock(&dev->struct_mutex);
if (attr == &dev_attr_gt_RP0_freq_mhz) {
- val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
+ if (IS_VALLEYVIEW(dev))
+ val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
+ else
+ val = ((rp_state_cap & 0x0000ff) >> 0) * GT_FREQUENCY_MULTIPLIER;
} else if (attr == &dev_attr_gt_RP1_freq_mhz) {
- val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
+ if (IS_VALLEYVIEW(dev))
+ val = vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
+ else
+ val = ((rp_state_cap & 0x00ff00) >> 8) * GT_FREQUENCY_MULTIPLIER;
} else if (attr == &dev_attr_gt_RPn_freq_mhz) {
- val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
+ if (IS_VALLEYVIEW(dev))
+ val = vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq);
+ else
+ val = ((rp_state_cap & 0xff0000) >> 16) * GT_FREQUENCY_MULTIPLIER;
} else {
BUG();
}
@@ -486,6 +518,9 @@ static const struct attribute *vlv_attrs[] = {
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_max_freq_mhz.attr,
&dev_attr_gt_min_freq_mhz.attr,
+ &dev_attr_gt_RP0_freq_mhz.attr,
+ &dev_attr_gt_RP1_freq_mhz.attr,
+ &dev_attr_gt_RPn_freq_mhz.attr,
&dev_attr_vlv_rpe_freq_mhz.attr,
NULL,
};
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 827498e081df..608ed302f24d 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -336,11 +336,12 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
+ dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
"active %s, min brightness %u, level %u\n",
dev_priv->vbt.backlight.pwm_freq_hz,
dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
- entry->min_brightness,
+ dev_priv->vbt.backlight.min_brightness,
backlight_data->level[panel_type]);
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 5a045d3bd77e..2efaf8e8d9c4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -137,6 +137,18 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
}
+static void hsw_crt_pre_enable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n");
+ I915_WRITE(SPLL_CTL,
+ SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC);
+ POSTING_READ(SPLL_CTL);
+ udelay(20);
+}
+
/* Note: The caller is required to filter out dpms modes not supported by the
* platform. */
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -194,6 +206,20 @@ static void intel_disable_crt(struct intel_encoder *encoder)
intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
}
+
+static void hsw_crt_post_disable(struct intel_encoder *encoder)
+{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t val;
+
+ DRM_DEBUG_KMS("Disabling SPLL\n");
+ val = I915_READ(SPLL_CTL);
+ WARN_ON(!(val & SPLL_PLL_ENABLE));
+ I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ POSTING_READ(SPLL_CTL);
+}
+
static void intel_enable_crt(struct intel_encoder *encoder)
{
struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -289,8 +315,10 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = 24;
/* FDI must always be 2.7 GHz */
- if (HAS_DDI(dev))
+ if (HAS_DDI(dev)) {
+ pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
pipe_config->port_clock = 135000 * 2;
+ }
return true;
}
@@ -632,8 +660,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
struct intel_load_detect_pipe tmp;
struct drm_modeset_acquire_ctx ctx;
- intel_runtime_pm_get(dev_priv);
-
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
connector->base.id, connector->name,
force);
@@ -685,8 +711,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
out:
intel_display_power_put(dev_priv, power_domain);
- intel_runtime_pm_put(dev_priv);
-
return status;
}
@@ -860,6 +884,8 @@ void intel_crt_init(struct drm_device *dev)
if (HAS_DDI(dev)) {
crt->base.get_config = hsw_crt_get_config;
crt->base.get_hw_state = intel_ddi_get_hw_state;
+ crt->base.pre_enable = hsw_crt_pre_enable;
+ crt->base.post_disable = hsw_crt_post_disable;
} else {
crt->base.get_config = intel_crt_get_config;
crt->base.get_hw_state = intel_crt_get_hw_state;
@@ -869,7 +895,7 @@ void intel_crt_init(struct drm_device *dev)
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
if (!I915_HAS_HOTPLUG(dev))
intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b17b9c7c769f..5db0b5552e39 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -76,12 +76,12 @@ static const u32 bdw_ddi_translations_edp[] = {
0x00FFFFFF, 0x00000012, /* eDP parameters */
0x00EBAFFF, 0x00020011,
0x00C71FFF, 0x0006000F,
+ 0x00AAAFFF, 0x000E000A,
0x00FFFFFF, 0x00020011,
0x00DB6FFF, 0x0005000F,
0x00BEEFFF, 0x000A000C,
0x00FFFFFF, 0x0005000F,
0x00DB6FFF, 0x000A000C,
- 0x00FFFFFF, 0x000A000C,
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
};
@@ -89,12 +89,12 @@ static const u32 bdw_ddi_translations_dp[] = {
0x00FFFFFF, 0x0007000E, /* DP parameters */
0x00D75FFF, 0x000E000A,
0x00BEFFFF, 0x00140006,
+ 0x80B2CFFF, 0x001B0002,
0x00FFFFFF, 0x000E000A,
0x00D75FFF, 0x00180004,
0x80CB2FFF, 0x001B0002,
0x00F7DFFF, 0x00180004,
0x80D75FFF, 0x001B0002,
- 0x80FFFFFF, 0x001B0002,
0x00FFFFFF, 0x00140006 /* HDMI parameters 800mV 0dB*/
};
@@ -116,7 +116,10 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
struct drm_encoder *encoder = &intel_encoder->base;
int type = intel_encoder->type;
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
+ if (type == INTEL_OUTPUT_DP_MST) {
+ struct intel_digital_port *intel_dig_port = enc_to_mst(encoder)->primary;
+ return intel_dig_port->port;
+ } else if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(encoder);
@@ -277,7 +280,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
/* Configure Port Clock Select */
- I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
+ I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config.ddi_pll_sel);
+ WARN_ON(intel_crtc->config.ddi_pll_sel != PORT_CLK_SEL_SPLL);
/* Start the training iterating through available voltages and emphasis,
* testing each value twice. */
@@ -364,6 +368,18 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
DRM_ERROR("FDI link training failed!\n");
}
+void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *intel_dig_port =
+ enc_to_dig_port(&encoder->base);
+
+ intel_dp->DP = intel_dig_port->saved_port_bits |
+ DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
+ intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
+
+}
+
static struct intel_encoder *
intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
{
@@ -385,53 +401,6 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
return ret;
}
-void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
- struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t val;
-
- switch (intel_crtc->ddi_pll_sel) {
- case PORT_CLK_SEL_SPLL:
- plls->spll_refcount--;
- if (plls->spll_refcount == 0) {
- DRM_DEBUG_KMS("Disabling SPLL\n");
- val = I915_READ(SPLL_CTL);
- WARN_ON(!(val & SPLL_PLL_ENABLE));
- I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
- POSTING_READ(SPLL_CTL);
- }
- break;
- case PORT_CLK_SEL_WRPLL1:
- plls->wrpll1_refcount--;
- if (plls->wrpll1_refcount == 0) {
- DRM_DEBUG_KMS("Disabling WRPLL 1\n");
- val = I915_READ(WRPLL_CTL1);
- WARN_ON(!(val & WRPLL_PLL_ENABLE));
- I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
- POSTING_READ(WRPLL_CTL1);
- }
- break;
- case PORT_CLK_SEL_WRPLL2:
- plls->wrpll2_refcount--;
- if (plls->wrpll2_refcount == 0) {
- DRM_DEBUG_KMS("Disabling WRPLL 2\n");
- val = I915_READ(WRPLL_CTL2);
- WARN_ON(!(val & WRPLL_PLL_ENABLE));
- I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
- POSTING_READ(WRPLL_CTL2);
- }
- break;
- }
-
- WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
- WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
- WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
-
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
-}
-
#define LC_FREQ 2700
#define LC_FREQ_2K (LC_FREQ * 2000)
@@ -592,9 +561,9 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
u32 wrpll;
wrpll = I915_READ(reg);
- switch (wrpll & SPLL_PLL_REF_MASK) {
- case SPLL_PLL_SSC:
- case SPLL_PLL_NON_SSC:
+ switch (wrpll & WRPLL_PLL_REF_MASK) {
+ case WRPLL_PLL_SSC:
+ case WRPLL_PLL_NON_SSC:
/*
* We could calculate spread here, but our checking
* code only cares about 5% accuracy, and spread is a max of
@@ -602,7 +571,7 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
*/
refclk = 135;
break;
- case SPLL_PLL_LCPLL:
+ case WRPLL_PLL_LCPLL:
refclk = LC_FREQ;
break;
default:
@@ -618,15 +587,14 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
return (refclk * n * 100) / (p * r);
}
-static void intel_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_config *pipe_config)
+void intel_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- enum port port = intel_ddi_get_encoder_port(encoder);
int link_clock = 0;
u32 val, pll;
- val = I915_READ(PORT_CLK_SEL(port));
+ val = pipe_config->ddi_pll_sel;
switch (val & PORT_CLK_SEL_MASK) {
case PORT_CLK_SEL_LCPLL_810:
link_clock = 81000;
@@ -750,173 +718,37 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = crtc->dev->dev_private;
- struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
int type = intel_encoder->type;
- enum pipe pipe = intel_crtc->pipe;
int clock = intel_crtc->config.port_clock;
- intel_ddi_put_crtc_pll(crtc);
-
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_put_shared_dpll(intel_crtc);
- switch (intel_dp->link_bw) {
- case DP_LINK_BW_1_62:
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
- break;
- case DP_LINK_BW_2_7:
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
- break;
- case DP_LINK_BW_5_4:
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
- break;
- default:
- DRM_ERROR("Link bandwidth %d unsupported\n",
- intel_dp->link_bw);
- return false;
- }
-
- } else if (type == INTEL_OUTPUT_HDMI) {
- uint32_t reg, val;
+ if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_shared_dpll *pll;
+ uint32_t val;
unsigned p, n2, r2;
intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
- val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
+ val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
- if (val == I915_READ(WRPLL_CTL1)) {
- DRM_DEBUG_KMS("Reusing WRPLL 1 on pipe %c\n",
- pipe_name(pipe));
- reg = WRPLL_CTL1;
- } else if (val == I915_READ(WRPLL_CTL2)) {
- DRM_DEBUG_KMS("Reusing WRPLL 2 on pipe %c\n",
- pipe_name(pipe));
- reg = WRPLL_CTL2;
- } else if (plls->wrpll1_refcount == 0) {
- DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
- pipe_name(pipe));
- reg = WRPLL_CTL1;
- } else if (plls->wrpll2_refcount == 0) {
- DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
- pipe_name(pipe));
- reg = WRPLL_CTL2;
- } else {
- DRM_ERROR("No WRPLLs available!\n");
- return false;
- }
-
- DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
- clock, p, n2, r2);
-
- if (reg == WRPLL_CTL1) {
- plls->wrpll1_refcount++;
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
- } else {
- plls->wrpll2_refcount++;
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
- }
+ intel_crtc->config.dpll_hw_state.wrpll = val;
- } else if (type == INTEL_OUTPUT_ANALOG) {
- if (plls->spll_refcount == 0) {
- DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
- pipe_name(pipe));
- plls->spll_refcount++;
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
- } else {
- DRM_ERROR("SPLL already in use\n");
+ pll = intel_get_shared_dpll(intel_crtc);
+ if (pll == NULL) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+ pipe_name(intel_crtc->pipe));
return false;
}
- } else {
- WARN(1, "Invalid DDI encoder type %d\n", type);
- return false;
+ intel_crtc->config.ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
}
return true;
}
-/*
- * To be called after intel_ddi_pll_select(). That one selects the PLL to be
- * used, this one actually enables the PLL.
- */
-void intel_ddi_pll_enable(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
- int clock = crtc->config.port_clock;
- uint32_t reg, cur_val, new_val;
- int refcount;
- const char *pll_name;
- uint32_t enable_bit = (1 << 31);
- unsigned int p, n2, r2;
-
- BUILD_BUG_ON(enable_bit != SPLL_PLL_ENABLE);
- BUILD_BUG_ON(enable_bit != WRPLL_PLL_ENABLE);
-
- switch (crtc->ddi_pll_sel) {
- case PORT_CLK_SEL_LCPLL_2700:
- case PORT_CLK_SEL_LCPLL_1350:
- case PORT_CLK_SEL_LCPLL_810:
- /*
- * LCPLL should always be enabled at this point of the mode set
- * sequence, so nothing to do.
- */
- return;
-
- case PORT_CLK_SEL_SPLL:
- pll_name = "SPLL";
- reg = SPLL_CTL;
- refcount = plls->spll_refcount;
- new_val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz |
- SPLL_PLL_SSC;
- break;
-
- case PORT_CLK_SEL_WRPLL1:
- case PORT_CLK_SEL_WRPLL2:
- if (crtc->ddi_pll_sel == PORT_CLK_SEL_WRPLL1) {
- pll_name = "WRPLL1";
- reg = WRPLL_CTL1;
- refcount = plls->wrpll1_refcount;
- } else {
- pll_name = "WRPLL2";
- reg = WRPLL_CTL2;
- refcount = plls->wrpll2_refcount;
- }
-
- intel_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
-
- new_val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
- WRPLL_DIVIDER_REFERENCE(r2) |
- WRPLL_DIVIDER_FEEDBACK(n2) | WRPLL_DIVIDER_POST(p);
-
- break;
-
- case PORT_CLK_SEL_NONE:
- WARN(1, "Bad selected pll: PORT_CLK_SEL_NONE\n");
- return;
- default:
- WARN(1, "Bad selected pll: 0x%08x\n", crtc->ddi_pll_sel);
- return;
- }
-
- cur_val = I915_READ(reg);
-
- WARN(refcount < 1, "Bad %s refcount: %d\n", pll_name, refcount);
- if (refcount == 1) {
- WARN(cur_val & enable_bit, "%s already enabled\n", pll_name);
- I915_WRITE(reg, new_val);
- POSTING_READ(reg);
- udelay(20);
- } else {
- WARN((cur_val & enable_bit) == 0, "%s disabled\n", pll_name);
- }
-}
-
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
@@ -926,8 +758,7 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
int type = intel_encoder->type;
uint32_t temp;
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
-
+ if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
temp = TRANS_MSA_SYNC_CLK;
switch (intel_crtc->config.pipe_bpp) {
case 18:
@@ -949,6 +780,21 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
}
}
+void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
+ uint32_t temp;
+ temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ if (state == true)
+ temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+ else
+ temp &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
+ I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+}
+
void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -995,7 +841,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
* eDP when not using the panel fitter, and when not
* using motion blur mitigation (which we don't
* support). */
- if (IS_HASWELL(dev) && intel_crtc->config.pch_pfit.enabled)
+ if (IS_HASWELL(dev) &&
+ (intel_crtc->config.pch_pfit.enabled ||
+ intel_crtc->config.pch_pfit.force_thru))
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -1026,7 +874,19 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+ if (intel_dp->is_mst) {
+ temp |= TRANS_DDI_MODE_SELECT_DP_MST;
+ } else
+ temp |= TRANS_DDI_MODE_SELECT_DP_SST;
+
+ temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
+ } else if (type == INTEL_OUTPUT_DP_MST) {
+ struct intel_dp *intel_dp = &enc_to_mst(encoder)->primary->dp;
+
+ if (intel_dp->is_mst) {
+ temp |= TRANS_DDI_MODE_SELECT_DP_MST;
+ } else
+ temp |= TRANS_DDI_MODE_SELECT_DP_SST;
temp |= DDI_PORT_WIDTH(intel_dp->lane_count);
} else {
@@ -1043,7 +903,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
uint32_t val = I915_READ(reg);
- val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
+ val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
val |= TRANS_DDI_PORT_NONE;
I915_WRITE(reg, val);
}
@@ -1082,8 +942,11 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
case TRANS_DDI_MODE_SELECT_DP_SST:
if (type == DRM_MODE_CONNECTOR_eDP)
return true;
- case TRANS_DDI_MODE_SELECT_DP_MST:
return (type == DRM_MODE_CONNECTOR_DisplayPort);
+ case TRANS_DDI_MODE_SELECT_DP_MST:
+ /* if the transcoder is in MST state then
+ * connector isn't connected */
+ return false;
case TRANS_DDI_MODE_SELECT_FDI:
return (type == DRM_MODE_CONNECTOR_VGA);
@@ -1135,6 +998,9 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
if ((tmp & TRANS_DDI_PORT_MASK)
== TRANS_DDI_SELECT_PORT(port)) {
+ if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST)
+ return false;
+
*pipe = i;
return true;
}
@@ -1146,76 +1012,6 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
return false;
}
-static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- uint32_t temp, ret;
- enum port port = I915_MAX_PORTS;
- enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
- pipe);
- int i;
-
- if (cpu_transcoder == TRANSCODER_EDP) {
- port = PORT_A;
- } else {
- temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- temp &= TRANS_DDI_PORT_MASK;
-
- for (i = PORT_B; i <= PORT_E; i++)
- if (temp == TRANS_DDI_SELECT_PORT(i))
- port = i;
- }
-
- if (port == I915_MAX_PORTS) {
- WARN(1, "Pipe %c enabled on an unknown port\n",
- pipe_name(pipe));
- ret = PORT_CLK_SEL_NONE;
- } else {
- ret = I915_READ(PORT_CLK_SEL(port));
- DRM_DEBUG_KMS("Pipe %c connected to port %c using clock "
- "0x%08x\n", pipe_name(pipe), port_name(port),
- ret);
- }
-
- return ret;
-}
-
-void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe pipe;
- struct intel_crtc *intel_crtc;
-
- dev_priv->ddi_plls.spll_refcount = 0;
- dev_priv->ddi_plls.wrpll1_refcount = 0;
- dev_priv->ddi_plls.wrpll2_refcount = 0;
-
- for_each_pipe(pipe) {
- intel_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
-
- if (!intel_crtc->active) {
- intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
- continue;
- }
-
- intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
- pipe);
-
- switch (intel_crtc->ddi_pll_sel) {
- case PORT_CLK_SEL_SPLL:
- dev_priv->ddi_plls.spll_refcount++;
- break;
- case PORT_CLK_SEL_WRPLL1:
- dev_priv->ddi_plls.wrpll1_refcount++;
- break;
- case PORT_CLK_SEL_WRPLL2:
- dev_priv->ddi_plls.wrpll2_refcount++;
- break;
- }
- }
-}
-
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
@@ -1261,17 +1057,13 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
intel_edp_panel_on(intel_dp);
}
- WARN_ON(crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
- I915_WRITE(PORT_CLK_SEL(port), crtc->ddi_pll_sel);
+ WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
+ I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(encoder);
- intel_dp->DP = intel_dig_port->saved_port_bits |
- DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
- intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count);
+ intel_ddi_init_dp_buf_reg(intel_encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
@@ -1418,10 +1210,60 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
}
}
+static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ I915_WRITE(WRPLL_CTL(pll->id), pll->hw_state.wrpll);
+ POSTING_READ(WRPLL_CTL(pll->id));
+ udelay(20);
+}
+
+static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ uint32_t val;
+
+ val = I915_READ(WRPLL_CTL(pll->id));
+ I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE);
+ POSTING_READ(WRPLL_CTL(pll->id));
+}
+
+static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ uint32_t val;
+
+ if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
+ val = I915_READ(WRPLL_CTL(pll->id));
+ hw_state->wrpll = val;
+
+ return val & WRPLL_PLL_ENABLE;
+}
+
+static const char * const hsw_ddi_pll_names[] = {
+ "WRPLL 1",
+ "WRPLL 2",
+};
+
void intel_ddi_pll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val = I915_READ(LCPLL_CTL);
+ int i;
+
+ dev_priv->num_shared_dpll = 2;
+
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ dev_priv->shared_dplls[i].id = i;
+ dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
+ dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable;
+ dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable;
+ dev_priv->shared_dplls[i].get_hw_state =
+ hsw_ddi_pll_get_hw_state;
+ }
/* The LCPLL register should be turned on by the BIOS. For now let's
* just check its state and print errors in case something is wrong.
@@ -1465,10 +1307,15 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
intel_wait_ddi_buf_idle(dev_priv, port);
}
- val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
+ val = DP_TP_CTL_ENABLE |
DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
- if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
- val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ if (intel_dp->is_mst)
+ val |= DP_TP_CTL_MODE_MST;
+ else {
+ val |= DP_TP_CTL_MODE_SST;
+ if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+ val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
+ }
I915_WRITE(DP_TP_CTL(port), val);
POSTING_READ(DP_TP_CTL(port));
@@ -1507,11 +1354,16 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
- int type = intel_encoder->type;
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&intel_encoder->base);
+ int type = intel_dig_port->base.type;
+
+ if (type != INTEL_OUTPUT_DISPLAYPORT &&
+ type != INTEL_OUTPUT_EDP &&
+ type != INTEL_OUTPUT_UNKNOWN) {
+ return;
+ }
- if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
- intel_dp_check_link_status(intel_dp);
+ intel_dp_hot_plug(intel_encoder);
}
void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -1663,15 +1515,13 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
- struct intel_connector *hdmi_connector = NULL;
- struct intel_connector *dp_connector = NULL;
bool init_hdmi, init_dp;
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
dev_priv->vbt.ddi_port_info[port].supports_hdmi);
init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
if (!init_dp && !init_hdmi) {
- DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n",
+ DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible, assuming it is\n",
port_name(port));
init_hdmi = true;
init_dp = true;
@@ -1701,20 +1551,28 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
DDI_A_4_LANES);
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+ intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_ddi_hot_plug;
- if (init_dp)
- dp_connector = intel_ddi_init_dp_connector(intel_dig_port);
+ if (init_dp) {
+ if (!intel_ddi_init_dp_connector(intel_dig_port))
+ goto err;
+
+ intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+ dev_priv->hpd_irq_port[port] = intel_dig_port;
+ }
/* In theory we don't need the encoder->type check, but leave it just in
* case we have some really bad VBTs... */
- if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi)
- hdmi_connector = intel_ddi_init_hdmi_connector(intel_dig_port);
-
- if (!dp_connector && !hdmi_connector) {
- drm_encoder_cleanup(encoder);
- kfree(intel_dig_port);
+ if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
+ if (!intel_ddi_init_hdmi_connector(intel_dig_port))
+ goto err;
}
+
+ return;
+
+err:
+ drm_encoder_cleanup(encoder);
+ kfree(intel_dig_port);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f0be855ddf45..99eb7cad62a8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -39,12 +39,45 @@
#include "i915_trace.h"
#include <drm/drm_dp_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_rect.h>
#include <linux/dma_remapping.h>
+/* Primary plane formats supported by all gen */
+#define COMMON_PRIMARY_FORMATS \
+ DRM_FORMAT_C8, \
+ DRM_FORMAT_RGB565, \
+ DRM_FORMAT_XRGB8888, \
+ DRM_FORMAT_ARGB8888
+
+/* Primary plane formats for gen <= 3 */
+static const uint32_t intel_primary_formats_gen2[] = {
+ COMMON_PRIMARY_FORMATS,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB1555,
+};
+
+/* Primary plane formats for gen >= 4 */
+static const uint32_t intel_primary_formats_gen4[] = {
+ COMMON_PRIMARY_FORMATS, \
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ABGR2101010,
+};
+
+/* Cursor formats */
+static const uint32_t intel_cursor_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
#define DIV_ROUND_CLOSEST_ULL(ll, d) \
- ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
+({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
-static void intel_increase_pllclock(struct drm_crtc *crtc);
+static void intel_increase_pllclock(struct drm_device *dev,
+ enum pipe pipe);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
@@ -68,6 +101,14 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc);
static void intel_set_pipe_csc(struct drm_crtc *crtc);
static void vlv_prepare_pll(struct intel_crtc *crtc);
+static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
+{
+ if (!connector->mst_port)
+ return connector->encoder;
+ else
+ return &connector->mst_port->mst_encoders[pipe]->base;
+}
+
typedef struct {
int min, max;
} intel_range_t;
@@ -1061,11 +1102,6 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool cur_state;
struct intel_dpll_hw_state hw_state;
- if (HAS_PCH_LPT(dev_priv->dev)) {
- DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
- return;
- }
-
if (WARN (!pll,
"asserting DPLL %s with no DPLL\n", state_string(state)))
return;
@@ -1481,9 +1517,6 @@ static void intel_reset_dpio(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!IS_VALLEYVIEW(dev))
- return;
-
if (IS_CHERRYVIEW(dev)) {
enum dpio_phy phy;
u32 val;
@@ -1505,26 +1538,6 @@ static void intel_reset_dpio(struct drm_device *dev)
I915_WRITE(DISPLAY_PHY_CONTROL,
PHY_COM_LANE_RESET_DEASSERT(phy, val));
}
-
- } else {
- /*
- * If DPIO has already been reset, e.g. by BIOS, just skip all
- * this.
- */
- if (I915_READ(DPIO_CTL) & DPIO_CMNRST)
- return;
-
- /*
- * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
- * Need to assert and de-assert PHY SB reset by gating the
- * common lane power, then un-gating it.
- * Simply ungating isn't enough to reset the PHY enough to get
- * ports and lanes running.
- */
- __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
- false);
- __vlv_set_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC,
- true);
}
}
@@ -1712,6 +1725,17 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
val &= ~DPIO_DCLKP_EN;
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
+ /* disable left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -1749,6 +1773,9 @@ static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
+ if (WARN_ON(pll == NULL))
+ return;
+
WARN_ON(!pll->refcount);
if (pll->active == 0) {
DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
@@ -1790,12 +1817,14 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc)
}
WARN_ON(pll->on);
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
+
DRM_DEBUG_KMS("enabling %s\n", pll->name);
pll->enable(dev_priv, pll);
pll->on = true;
}
-static void intel_disable_shared_dpll(struct intel_crtc *crtc)
+void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1826,6 +1855,8 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc)
DRM_DEBUG_KMS("disabling %s\n", pll->name);
pll->disable(dev_priv, pll);
pll->on = false;
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
}
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
@@ -2172,6 +2203,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
u32 alignment;
int ret;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
switch (obj->tiling_mode) {
case I915_TILING_NONE:
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
@@ -2228,6 +2261,8 @@ err_interruptible:
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
{
+ WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
+
i915_gem_object_unpin_fence(obj);
i915_gem_object_unpin_from_display_plane(obj);
}
@@ -2314,6 +2349,7 @@ static bool intel_alloc_plane_obj(struct intel_crtc *crtc,
goto out_unref_obj;
}
+ obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe);
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG_KMS("plane fb obj %p\n", obj);
@@ -2331,7 +2367,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
struct drm_device *dev = intel_crtc->base.dev;
struct drm_crtc *c;
struct intel_crtc *i;
- struct intel_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
if (!intel_crtc->base.primary->fb)
return;
@@ -2352,13 +2388,17 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
if (c == &intel_crtc->base)
continue;
- if (!i->active || !c->primary->fb)
+ if (!i->active)
+ continue;
+
+ obj = intel_fb_obj(c->primary->fb);
+ if (obj == NULL)
continue;
- fb = to_intel_framebuffer(c->primary->fb);
- if (i915_gem_obj_ggtt_offset(fb->obj) == plane_config->base) {
+ if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
drm_framebuffer_reference(c->primary->fb);
intel_crtc->base.primary->fb = c->primary->fb;
+ obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
break;
}
}
@@ -2371,16 +2411,12 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
u32 reg;
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
reg = DSPCNTR(plane);
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
@@ -2461,16 +2497,12 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
u32 reg;
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
-
reg = DSPCNTR(plane);
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
@@ -2546,7 +2578,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- intel_increase_pllclock(crtc);
+ intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
dev_priv->display.update_primary_plane(crtc, fb, x, y);
@@ -2601,7 +2633,7 @@ void intel_display_handle_reset(struct drm_device *dev)
static int
intel_finish_fb(struct drm_framebuffer *old_fb)
{
- struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(old_fb);
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
bool was_interruptible = dev_priv->mm.interruptible;
int ret;
@@ -2647,7 +2679,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_framebuffer *old_fb;
+ enum pipe pipe = intel_crtc->pipe;
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
int ret;
if (intel_crtc_has_pending_flip(crtc)) {
@@ -2669,9 +2704,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
}
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev,
- to_intel_framebuffer(fb)->obj,
- NULL);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ if (ret == 0)
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
if (ret != 0) {
DRM_ERROR("pin & fence failed\n");
@@ -2711,7 +2747,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
dev_priv->display.update_primary_plane(crtc, fb, x, y);
- old_fb = crtc->primary->fb;
+ if (intel_crtc->active)
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+
crtc->primary->fb = fb;
crtc->x = x;
crtc->y = y;
@@ -2720,13 +2758,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (intel_crtc->active && old_fb != fb)
intel_wait_for_vblank(dev, intel_crtc->pipe);
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+ intel_unpin_fb_obj(old_obj);
mutex_unlock(&dev->struct_mutex);
}
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -3587,7 +3624,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
-static void intel_put_shared_dpll(struct intel_crtc *crtc)
+void intel_put_shared_dpll(struct intel_crtc *crtc)
{
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
@@ -3607,7 +3644,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc)
crtc->config.shared_dpll = DPLL_ID_PRIVATE;
}
-static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
@@ -3818,7 +3855,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
}
/* use legacy palette for Ironlake */
- if (HAS_PCH_SPLIT(dev))
+ if (!HAS_GMCH_DISPLAY(dev))
palreg = LGC_PALETTE(pipe);
/* Workaround : Do not read or write the pipe palette/gamma data while
@@ -3860,30 +3897,6 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
*/
}
-/**
- * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
- * cursor plane briefly if not already running after enabling the display
- * plane.
- * This workaround avoids occasional blank screens when self refresh is
- * enabled.
- */
-static void
-g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- u32 cntl = I915_READ(CURCNTR(pipe));
-
- if ((cntl & CURSOR_MODE) == 0) {
- u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
-
- I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
- I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
- intel_wait_for_vblank(dev_priv->dev, pipe);
- I915_WRITE(CURCNTR(pipe), cntl);
- I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
- I915_WRITE(FW_BLC_SELF, fw_bcl_self);
- }
-}
-
static void intel_crtc_enable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -3892,11 +3905,10 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
+ drm_vblank_on(dev, pipe);
+
intel_enable_primary_hw_plane(dev_priv, plane, pipe);
intel_enable_planes(crtc);
- /* The fixup needs to happen before cursor is enabled */
- if (IS_G4X(dev))
- g4x_fixup_plane(dev_priv, pipe);
intel_crtc_update_cursor(crtc, true);
intel_crtc_dpms_overlay(intel_crtc, true);
@@ -3904,8 +3916,14 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
+
+ /*
+ * FIXME: Once we grow proper nuclear flip support out of this we need
+ * to compute the mask of flip planes precisely. For the time being
+ * consider this a flip from a NULL plane.
+ */
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
}
static void intel_crtc_disable_planes(struct drm_crtc *crtc)
@@ -3917,7 +3935,6 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
int plane = intel_crtc->plane;
intel_crtc_wait_for_pending_flips(crtc);
- drm_crtc_vblank_off(crtc);
if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
@@ -3928,6 +3945,15 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
intel_disable_primary_hw_plane(dev_priv, plane, pipe);
+
+ /*
+ * FIXME: Once we grow proper nuclear flip support out of this we need
+ * to compute the mask of flip planes precisely. For the time being
+ * consider this a flip to a NULL plane.
+ */
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
+
+ drm_vblank_off(dev, pipe);
}
static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -4006,8 +4032,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
cpt_verify_modeset(dev, intel_crtc->pipe);
intel_crtc_enable_planes(crtc);
-
- drm_crtc_vblank_on(crtc);
}
/* IPS only exists on ULT machines and is tied to pipe A. */
@@ -4059,6 +4083,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->active)
return;
+ if (intel_crtc_to_shared_dpll(intel_crtc))
+ intel_enable_shared_dpll(intel_crtc);
+
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
@@ -4083,16 +4110,15 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
- if (intel_crtc->config.has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
-
- if (intel_crtc->config.has_pch_encoder)
- dev_priv->display.fdi_link_train(crtc);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
+ if (intel_crtc->config.has_pch_encoder) {
+ intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
+ dev_priv->display.fdi_link_train(crtc);
+ }
+
intel_ddi_enable_pipe_clock(intel_crtc);
ironlake_pfit_enable(intel_crtc);
@@ -4112,6 +4138,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
if (intel_crtc->config.has_pch_encoder)
lpt_pch_enable(crtc);
+ if (intel_crtc->config.dp_encoder_is_mst)
+ intel_ddi_set_vc_payload_alloc(crtc, true);
+
for_each_encoder_on_crtc(dev, crtc, encoder) {
encoder->enable(encoder);
intel_opregion_notify_encoder(encoder, true);
@@ -4121,8 +4150,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
* to change the workaround. */
haswell_mode_set_planes_workaround(intel_crtc);
intel_crtc_enable_planes(crtc);
-
- drm_crtc_vblank_on(crtc);
}
static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -4162,6 +4189,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_disable_pipe(dev_priv, pipe);
+ if (intel_crtc->config.dp_encoder_is_mst)
+ intel_ddi_set_vc_payload_alloc(crtc, false);
+
ironlake_pfit_disable(intel_crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4200,7 +4230,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -4233,23 +4262,25 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
intel_ddi_disable_pipe_clock(intel_crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->post_disable)
- encoder->post_disable(encoder);
-
if (intel_crtc->config.has_pch_encoder) {
lpt_disable_pch_transcoder(dev_priv);
intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
intel_ddi_fdi_disable(crtc);
}
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
+
intel_crtc->active = false;
intel_update_watermarks(crtc);
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
+
+ if (intel_crtc_to_shared_dpll(intel_crtc))
+ intel_disable_shared_dpll(intel_crtc);
}
static void ironlake_crtc_off(struct drm_crtc *crtc)
@@ -4258,10 +4289,6 @@ static void ironlake_crtc_off(struct drm_crtc *crtc)
intel_put_shared_dpll(intel_crtc);
}
-static void haswell_crtc_off(struct drm_crtc *crtc)
-{
- intel_ddi_put_crtc_pll(crtc);
-}
static void i9xx_pfit_enable(struct intel_crtc *crtc)
{
@@ -4287,6 +4314,23 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
+static enum intel_display_power_domain port_to_power_domain(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ return POWER_DOMAIN_PORT_DDI_A_4_LANES;
+ case PORT_B:
+ return POWER_DOMAIN_PORT_DDI_B_4_LANES;
+ case PORT_C:
+ return POWER_DOMAIN_PORT_DDI_C_4_LANES;
+ case PORT_D:
+ return POWER_DOMAIN_PORT_DDI_D_4_LANES;
+ default:
+ WARN_ON_ONCE(1);
+ return POWER_DOMAIN_PORT_OTHER;
+ }
+}
+
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
if ((1 << (domain)) & (mask))
@@ -4305,19 +4349,10 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
- switch (intel_dig_port->port) {
- case PORT_A:
- return POWER_DOMAIN_PORT_DDI_A_4_LANES;
- case PORT_B:
- return POWER_DOMAIN_PORT_DDI_B_4_LANES;
- case PORT_C:
- return POWER_DOMAIN_PORT_DDI_C_4_LANES;
- case PORT_D:
- return POWER_DOMAIN_PORT_DDI_D_4_LANES;
- default:
- WARN_ON_ONCE(1);
- return POWER_DOMAIN_PORT_OTHER;
- }
+ return port_to_power_domain(intel_dig_port->port);
+ case INTEL_OUTPUT_DP_MST:
+ intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
+ return port_to_power_domain(intel_dig_port->port);
case INTEL_OUTPUT_ANALOG:
return POWER_DOMAIN_PORT_CRT;
case INTEL_OUTPUT_DSI:
@@ -4333,7 +4368,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
struct intel_encoder *intel_encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
- bool pfit_enabled = intel_crtc->config.pch_pfit.enabled;
unsigned long mask;
enum transcoder transcoder;
@@ -4341,7 +4375,8 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
mask = BIT(POWER_DOMAIN_PIPE(pipe));
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
- if (pfit_enabled)
+ if (intel_crtc->config.pch_pfit.enabled ||
+ intel_crtc->config.pch_pfit.force_thru)
mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
@@ -4398,7 +4433,8 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
intel_display_set_init_power(dev_priv, false);
}
-int valleyview_get_vco(struct drm_i915_private *dev_priv)
+/* returns HPLL frequency in kHz */
+static int valleyview_get_vco(struct drm_i915_private *dev_priv)
{
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
@@ -4408,7 +4444,23 @@ int valleyview_get_vco(struct drm_i915_private *dev_priv)
CCK_FUSE_HPLL_FREQ_MASK;
mutex_unlock(&dev_priv->dpio_lock);
- return vco_freq[hpll_freq];
+ return vco_freq[hpll_freq] * 1000;
+}
+
+static void vlv_update_cdclk(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
+ DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz",
+ dev_priv->vlv_cdclk_freq);
+
+ /*
+ * Program the gmbus_freq based on the cdclk frequency.
+ * BSpec erroneously claims we should aim for 4MHz, but
+ * in fact 1MHz is the correct frequency.
+ */
+ I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq);
}
/* Adjust CDclk dividers to allow high res or save power if possible */
@@ -4417,12 +4469,11 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val, cmd;
- WARN_ON(valleyview_cur_cdclk(dev_priv) != dev_priv->vlv_cdclk_freq);
- dev_priv->vlv_cdclk_freq = cdclk;
+ WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
- if (cdclk >= 320) /* jump to highest voltage for 400MHz too */
+ if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
cmd = 2;
- else if (cdclk == 266)
+ else if (cdclk == 266667)
cmd = 1;
else
cmd = 0;
@@ -4439,18 +4490,23 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
}
mutex_unlock(&dev_priv->rps.hw_lock);
- if (cdclk == 400) {
+ if (cdclk == 400000) {
u32 divider, vco;
vco = valleyview_get_vco(dev_priv);
- divider = ((vco << 1) / cdclk) - 1;
+ divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
mutex_lock(&dev_priv->dpio_lock);
/* adjust cdclk divider */
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
- val &= ~0xf;
+ val &= ~DISPLAY_FREQUENCY_VALUES;
val |= divider;
vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+
+ if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
+ DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
+ 50))
+ DRM_ERROR("timed out waiting for CDclk change\n");
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -4463,54 +4519,43 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
* For high bandwidth configs, we set a higher latency in the bunit
* so that the core display fetch happens in time to avoid underruns.
*/
- if (cdclk == 400)
+ if (cdclk == 400000)
val |= 4500 / 250; /* 4.5 usec */
else
val |= 3000 / 250; /* 3.0 usec */
vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
mutex_unlock(&dev_priv->dpio_lock);
- /* Since we changed the CDclk, we need to update the GMBUSFREQ too */
- intel_i2c_reset(dev);
-}
-
-int valleyview_cur_cdclk(struct drm_i915_private *dev_priv)
-{
- int cur_cdclk, vco;
- int divider;
-
- vco = valleyview_get_vco(dev_priv);
-
- mutex_lock(&dev_priv->dpio_lock);
- divider = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
- mutex_unlock(&dev_priv->dpio_lock);
-
- divider &= 0xf;
-
- cur_cdclk = (vco << 1) / (divider + 1);
-
- return cur_cdclk;
+ vlv_update_cdclk(dev);
}
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk)
{
+ int vco = valleyview_get_vco(dev_priv);
+ int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000;
+
/*
* Really only a few cases to deal with, as only 4 CDclks are supported:
* 200MHz
* 267MHz
- * 320MHz
+ * 320/333MHz (depends on HPLL freq)
* 400MHz
* So we check to see whether we're above 90% of the lower bin and
* adjust if needed.
+ *
+ * We seem to get an unstable or solid color picture at 200MHz.
+ * Not sure what's wrong. For now use 200MHz only when all pipes
+ * are off.
*/
- if (max_pixclk > 288000) {
- return 400;
- } else if (max_pixclk > 240000) {
- return 320;
- } else
- return 266;
- /* Looks like the 200MHz CDclk freq doesn't work on some configs */
+ if (max_pixclk > freq_320*9/10)
+ return 400000;
+ else if (max_pixclk > 266667*9/10)
+ return freq_320;
+ else if (max_pixclk > 0)
+ return 266667;
+ else
+ return 200000;
}
/* compute the max pixel clock for new configuration */
@@ -4633,8 +4678,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_crtc_enable_planes(crtc);
- drm_crtc_vblank_on(crtc);
-
/* Underruns don't raise interrupts, so check manually. */
i9xx_check_fifo_underruns(dev);
}
@@ -4727,8 +4770,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
if (IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
- drm_crtc_vblank_on(crtc);
-
/* Underruns don't raise interrupts, so check manually. */
i9xx_check_fifo_underruns(dev);
}
@@ -4768,6 +4809,16 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (IS_GEN2(dev))
intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
+ /*
+ * Vblank time updates from the shadow to live plane control register
+ * are blocked if the memory self-refresh mode is active at that
+ * moment. So to make sure the plane gets truly disabled, disable
+ * first the self-refresh mode. The self-refresh enable bit in turn
+ * will be checked/applied by the HW only at the next frame start
+ * event which is after the vblank start event, so we need to have a
+ * wait-for-vblank between disabling the plane and the pipe.
+ */
+ intel_set_memory_cxsr(dev_priv, false);
intel_crtc_disable_planes(crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4776,9 +4827,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
/*
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
+ * We also need to wait on all gmch platforms because of the
+ * self-refresh mode constraint explained above.
*/
- if (IS_GEN2(dev))
- intel_wait_for_vblank(dev, pipe);
+ intel_wait_for_vblank(dev, pipe);
intel_disable_pipe(dev_priv, pipe);
@@ -4805,7 +4857,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
- intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
}
@@ -4843,23 +4894,49 @@ static void intel_crtc_update_sarea(struct drm_crtc *crtc,
}
}
+/* Master function to enable/disable CRTC and corresponding power wells */
+void intel_crtc_control(struct drm_crtc *crtc, bool enable)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum intel_display_power_domain domain;
+ unsigned long domains;
+
+ if (enable) {
+ if (!intel_crtc->active) {
+ domains = get_crtc_power_domains(crtc);
+ for_each_power_domain(domain, domains)
+ intel_display_power_get(dev_priv, domain);
+ intel_crtc->enabled_power_domains = domains;
+
+ dev_priv->display.crtc_enable(crtc);
+ }
+ } else {
+ if (intel_crtc->active) {
+ dev_priv->display.crtc_disable(crtc);
+
+ domains = intel_crtc->enabled_power_domains;
+ for_each_power_domain(domain, domains)
+ intel_display_power_put(dev_priv, domain);
+ intel_crtc->enabled_power_domains = 0;
+ }
+ }
+}
+
/**
* Sets the power management mode of the pipe and plane.
*/
void intel_crtc_update_dpms(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
bool enable = false;
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
enable |= intel_encoder->connectors_active;
- if (enable)
- dev_priv->display.crtc_enable(crtc);
- else
- dev_priv->display.crtc_disable(crtc);
+ intel_crtc_control(crtc, enable);
intel_crtc_update_sarea(crtc, enable);
}
@@ -4869,6 +4946,8 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(crtc->primary->fb);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
@@ -4877,13 +4956,11 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
- assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
- assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
- assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
-
if (crtc->primary->fb) {
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(to_intel_framebuffer(crtc->primary->fb)->obj);
+ intel_unpin_fb_obj(old_obj);
+ i915_gem_track_fb(old_obj, NULL,
+ INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
crtc->primary->fb = NULL;
}
@@ -4939,24 +5016,31 @@ static void intel_connector_check_state(struct intel_connector *connector)
connector->base.base.id,
connector->base.name);
+ /* there is no real hw state for MST connectors */
+ if (connector->mst_port)
+ return;
+
WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
"wrong connector dpms state\n");
WARN(connector->base.encoder != &encoder->base,
"active connector not linked to encoder\n");
- WARN(!encoder->connectors_active,
- "encoder->connectors_active not set\n");
- encoder_enabled = encoder->get_hw_state(encoder, &pipe);
- WARN(!encoder_enabled, "encoder not enabled\n");
- if (WARN_ON(!encoder->base.crtc))
- return;
+ if (encoder) {
+ WARN(!encoder->connectors_active,
+ "encoder->connectors_active not set\n");
+
+ encoder_enabled = encoder->get_hw_state(encoder, &pipe);
+ WARN(!encoder_enabled, "encoder not enabled\n");
+ if (WARN_ON(!encoder->base.crtc))
+ return;
- crtc = encoder->base.crtc;
+ crtc = encoder->base.crtc;
- WARN(!crtc->enabled, "crtc not enabled\n");
- WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
- WARN(pipe != to_intel_crtc(crtc)->pipe,
- "encoder active on the wrong pipe\n");
+ WARN(!crtc->enabled, "crtc not enabled\n");
+ WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
+ WARN(pipe != to_intel_crtc(crtc)->pipe,
+ "encoder active on the wrong pipe\n");
+ }
}
}
@@ -5161,9 +5245,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
if (HAS_IPS(dev))
hsw_compute_ips_config(crtc, pipe_config);
- /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
- * clock survives for now. */
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ /*
+ * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
+ * old clock survives for now.
+ */
+ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
pipe_config->shared_dpll = crtc->config.shared_dpll;
if (pipe_config->has_pch_encoder)
@@ -5174,7 +5260,22 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
static int valleyview_get_display_clock_speed(struct drm_device *dev)
{
- return 400000; /* FIXME */
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int vco = valleyview_get_vco(dev_priv);
+ u32 val;
+ int divider;
+
+ mutex_lock(&dev_priv->dpio_lock);
+ val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ divider = val & DISPLAY_FREQUENCY_VALUES;
+
+ WARN((val & DISPLAY_FREQUENCY_STATUS) !=
+ (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
+ "cdclk change in progress\n");
+
+ return DIV_ROUND_CLOSEST(vco << 1, divider + 1);
}
static int i945_get_display_clock_speed(struct drm_device *dev)
@@ -6125,8 +6226,8 @@ static void i9xx_get_plane_config(struct intel_crtc *crtc,
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled);
- plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] *
- aligned_height, PAGE_SIZE);
+ plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
+ aligned_height);
DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe, plane, crtc->base.primary->fb->width,
@@ -7145,8 +7246,8 @@ static void ironlake_get_plane_config(struct intel_crtc *crtc,
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled);
- plane_config->size = ALIGN(crtc->base.primary->fb->pitches[0] *
- aligned_height, PAGE_SIZE);
+ plane_config->size = PAGE_ALIGN(crtc->base.primary->fb->pitches[0] *
+ aligned_height);
DRM_DEBUG_KMS("pipe/plane %d/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe, plane, crtc->base.primary->fb->width,
@@ -7163,6 +7264,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
+ if (!intel_display_power_enabled(dev_priv,
+ POWER_DOMAIN_PIPE(crtc->pipe)))
+ return false;
+
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -7237,7 +7342,6 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
struct intel_crtc *crtc;
for_each_intel_crtc(dev, crtc)
@@ -7245,14 +7349,15 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
pipe_name(crtc->pipe));
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
- WARN(plls->spll_refcount, "SPLL enabled\n");
- WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
- WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
+ WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
+ WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
+ WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
- WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
- "CPU PWM2 enabled\n");
+ if (IS_HASWELL(dev))
+ WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+ "CPU PWM2 enabled\n");
WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
"PCH PWM1 enabled\n");
WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
@@ -7265,7 +7370,17 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
* gen-specific and since we only disable LCPLL after we fully disable
* the interrupts, the check below should be enough.
*/
- WARN(!dev_priv->pm.irqs_disabled, "IRQs enabled\n");
+ WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
+}
+
+static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ if (IS_HASWELL(dev))
+ return I915_READ(D_COMP_HSW);
+ else
+ return I915_READ(D_COMP_BDW);
}
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
@@ -7276,12 +7391,12 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
mutex_lock(&dev_priv->rps.hw_lock);
if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
val))
- DRM_ERROR("Failed to disable D_COMP\n");
+ DRM_ERROR("Failed to write to D_COMP\n");
mutex_unlock(&dev_priv->rps.hw_lock);
} else {
- I915_WRITE(D_COMP, val);
+ I915_WRITE(D_COMP_BDW, val);
+ POSTING_READ(D_COMP_BDW);
}
- POSTING_READ(D_COMP);
}
/*
@@ -7319,12 +7434,13 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
DRM_ERROR("LCPLL still locked\n");
- val = I915_READ(D_COMP);
+ val = hsw_read_dcomp(dev_priv);
val |= D_COMP_COMP_DISABLE;
hsw_write_dcomp(dev_priv, val);
ndelay(100);
- if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+ if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
+ 1))
DRM_ERROR("D_COMP RCOMP still in progress\n");
if (allow_power_down) {
@@ -7373,7 +7489,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
POSTING_READ(LCPLL_CTL);
}
- val = I915_READ(D_COMP);
+ val = hsw_read_dcomp(dev_priv);
val |= D_COMP_COMP_FORCE;
val &= ~D_COMP_COMP_DISABLE;
hsw_write_dcomp(dev_priv, val);
@@ -7479,13 +7595,59 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
if (!intel_ddi_pll_select(intel_crtc))
return -EINVAL;
- intel_ddi_pll_enable(intel_crtc);
intel_crtc->lowfreq_avail = false;
return 0;
}
+static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_shared_dpll *pll;
+ enum port port;
+ uint32_t tmp;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
+
+ port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
+
+ pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
+
+ switch (pipe_config->ddi_pll_sel) {
+ case PORT_CLK_SEL_WRPLL1:
+ pipe_config->shared_dpll = DPLL_ID_WRPLL1;
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ pipe_config->shared_dpll = DPLL_ID_WRPLL2;
+ break;
+ }
+
+ if (pipe_config->shared_dpll >= 0) {
+ pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
+
+ WARN_ON(!pll->get_hw_state(dev_priv, pll,
+ &pipe_config->dpll_hw_state));
+ }
+
+ /*
+ * Haswell has only FDI/PCH transcoder A. It is which is connected to
+ * DDI E. So just check whether this pipe is wired to DDI E and whether
+ * the PCH transcoder is on.
+ */
+ if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+ pipe_config->has_pch_encoder = true;
+
+ tmp = I915_READ(FDI_RX_CTL(PIPE_A));
+ pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
+ FDI_DP_PORT_WIDTH_SHIFT) + 1;
+
+ ironlake_get_fdi_m_n_config(crtc, pipe_config);
+ }
+}
+
static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
@@ -7531,22 +7693,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
if (!(tmp & PIPECONF_ENABLE))
return false;
- /*
- * Haswell has only FDI/PCH transcoder A. It is which is connected to
- * DDI E. So just check whether this pipe is wired to DDI E and whether
- * the PCH transcoder is on.
- */
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
- if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
- I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
- pipe_config->has_pch_encoder = true;
-
- tmp = I915_READ(FDI_RX_CTL(PIPE_A));
- pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
- FDI_DP_PORT_WIDTH_SHIFT) + 1;
-
- ironlake_get_fdi_m_n_config(crtc, pipe_config);
- }
+ haswell_get_ddi_port_state(crtc, pipe_config);
intel_get_pipe_timings(crtc, pipe_config);
@@ -7991,8 +8138,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int x = intel_crtc->cursor_x;
- int y = intel_crtc->cursor_y;
+ int x = crtc->cursor_x;
+ int y = crtc->cursor_y;
u32 base = 0, pos = 0;
if (on)
@@ -8036,21 +8183,27 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
intel_crtc->cursor_base = base;
}
-static int intel_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file,
- uint32_t handle,
- uint32_t width, uint32_t height)
+/*
+ * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
+ *
+ * Note that the object's reference will be consumed if the update fails. If
+ * the update succeeds, the reference of the old object (if any) will be
+ * consumed.
+ */
+static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
+ struct drm_i915_gem_object *obj,
+ uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_gem_object *obj;
+ enum pipe pipe = intel_crtc->pipe;
unsigned old_width;
uint32_t addr;
int ret;
/* if we want to turn off the cursor ignore width and height */
- if (!handle) {
+ if (!obj) {
DRM_DEBUG_KMS("cursor off\n");
addr = 0;
obj = NULL;
@@ -8066,12 +8219,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return -EINVAL;
}
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
- if (&obj->base == NULL)
- return -ENOENT;
-
if (obj->base.size < width * height * 4) {
- DRM_DEBUG_KMS("buffer is to small\n");
+ DRM_DEBUG_KMS("buffer is too small\n");
ret = -ENOMEM;
goto fail;
}
@@ -8126,9 +8275,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
if (intel_crtc->cursor_bo) {
if (!INTEL_INFO(dev)->cursor_needs_physical)
i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
- drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
+ i915_gem_track_fb(intel_crtc->cursor_bo, obj,
+ INTEL_FRONTBUFFER_CURSOR(pipe));
mutex_unlock(&dev->struct_mutex);
old_width = intel_crtc->cursor_width;
@@ -8144,6 +8294,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
}
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
+
return 0;
fail_unpin:
i915_gem_object_unpin_from_display_plane(obj);
@@ -8154,19 +8306,6 @@ fail:
return ret;
}
-static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- intel_crtc->cursor_x = clamp_t(int, x, SHRT_MIN, SHRT_MAX);
- intel_crtc->cursor_y = clamp_t(int, y, SHRT_MIN, SHRT_MAX);
-
- if (intel_crtc->active)
- intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
-
- return 0;
-}
-
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t start, uint32_t size)
{
@@ -8242,7 +8381,7 @@ static u32
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
{
u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
- return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
+ return PAGE_ALIGN(pitch * mode->vdisplay);
}
static struct drm_framebuffer *
@@ -8667,16 +8806,14 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-static void intel_increase_pllclock(struct drm_crtc *crtc)
+static void intel_increase_pllclock(struct drm_device *dev,
+ enum pipe pipe)
{
- struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
int dpll_reg = DPLL(pipe);
int dpll;
- if (HAS_PCH_SPLIT(dev))
+ if (!HAS_GMCH_DISPLAY(dev))
return;
if (!dev_priv->lvds_downclock_avail)
@@ -8704,7 +8841,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (HAS_PCH_SPLIT(dev))
+ if (!HAS_GMCH_DISPLAY(dev))
return;
if (!dev_priv->lvds_downclock_avail)
@@ -8773,28 +8910,179 @@ out:
intel_runtime_pm_put(dev_priv);
}
-void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring)
+
+/**
+ * intel_mark_fb_busy - mark given planes as busy
+ * @dev: DRM device
+ * @frontbuffer_bits: bits for the affected planes
+ * @ring: optional ring for asynchronous commands
+ *
+ * This function gets called every time the screen contents change. It can be
+ * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
+ */
+static void intel_mark_fb_busy(struct drm_device *dev,
+ unsigned frontbuffer_bits,
+ struct intel_engine_cs *ring)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_crtc *crtc;
+ enum pipe pipe;
if (!i915.powersave)
return;
- for_each_crtc(dev, crtc) {
- if (!crtc->primary->fb)
- continue;
-
- if (to_intel_framebuffer(crtc->primary->fb)->obj != obj)
+ for_each_pipe(pipe) {
+ if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
continue;
- intel_increase_pllclock(crtc);
+ intel_increase_pllclock(dev, pipe);
if (ring && intel_fbc_enabled(dev))
ring->fbc_dirty = true;
}
}
+/**
+ * intel_fb_obj_invalidate - invalidate frontbuffer object
+ * @obj: GEM object to invalidate
+ * @ring: set for asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (!obj->frontbuffer_bits)
+ return;
+
+ if (ring) {
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ dev_priv->fb_tracking.busy_bits
+ |= obj->frontbuffer_bits;
+ dev_priv->fb_tracking.flip_bits
+ &= ~obj->frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+ }
+
+ intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
+
+ intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flush - flush frontbuffer
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called every time rendering on the given planes has
+ * completed and frontbuffer caching can be started again. Flushes will get
+ * delayed if they're blocked by some oustanding asynchronous rendering.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Delay flushing when rings are still busy.*/
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+
+ intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
+
+ intel_edp_psr_flush(dev, frontbuffer_bits);
+}
+
+/**
+ * intel_fb_obj_flush - flush frontbuffer object
+ * @obj: GEM object to flush
+ * @retire: set when retiring asynchronous rendering
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again. If @retire is true
+ * then any delayed flushes will be unblocked.
+ */
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+ bool retire)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned frontbuffer_bits;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (!obj->frontbuffer_bits)
+ return;
+
+ frontbuffer_bits = obj->frontbuffer_bits;
+
+ if (retire) {
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ /* Filter out new bits since rendering started. */
+ frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
+
+ dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+ }
+
+ intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
+/**
+ * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. The actual
+ * frontbuffer flushing will be delayed until completion is signalled with
+ * intel_frontbuffer_flip_complete. If an invalidate happens in between this
+ * flush will be cancelled.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ dev_priv->fb_tracking.flip_bits
+ |= frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+}
+
+/**
+ * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after the flip has been latched and will complete
+ * on the next vblank. It will execute the fush if it hasn't been cancalled yet.
+ *
+ * Can be called without any locks held.
+ */
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ mutex_lock(&dev_priv->fb_tracking.lock);
+ /* Mask any cancelled flips. */
+ frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
+ dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
+ mutex_unlock(&dev_priv->fb_tracking.lock);
+
+ intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -8812,8 +9100,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
kfree(work);
}
- intel_crtc_cursor_set(crtc, NULL, 0, 0, 0);
-
drm_crtc_cleanup(crtc);
kfree(intel_crtc);
@@ -8824,6 +9110,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
struct intel_unpin_work *work =
container_of(__work, struct intel_unpin_work, work);
struct drm_device *dev = work->crtc->dev;
+ enum pipe pipe = to_intel_crtc(work->crtc)->pipe;
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb_obj);
@@ -8833,6 +9120,8 @@ static void intel_unpin_work_fn(struct work_struct *__work)
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
+ intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
+
BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
@@ -9202,6 +9491,150 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
return 0;
}
+static bool use_mmio_flip(struct intel_engine_cs *ring,
+ struct drm_i915_gem_object *obj)
+{
+ /*
+ * This is not being used for older platforms, because
+ * non-availability of flip done interrupt forces us to use
+ * CS flips. Older platforms derive flip done using some clever
+ * tricks involving the flip_pending status bits and vblank irqs.
+ * So using MMIO flips there would disrupt this mechanism.
+ */
+
+ if (ring == NULL)
+ return true;
+
+ if (INTEL_INFO(ring->dev)->gen < 5)
+ return false;
+
+ if (i915.use_mmio_flip < 0)
+ return false;
+ else if (i915.use_mmio_flip > 0)
+ return true;
+ else
+ return ring != obj->ring;
+}
+
+static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
+{
+ struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_framebuffer *intel_fb =
+ to_intel_framebuffer(intel_crtc->base.primary->fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ u32 dspcntr;
+ u32 reg;
+
+ intel_mark_page_flip_active(intel_crtc);
+
+ reg = DSPCNTR(intel_crtc->plane);
+ dspcntr = I915_READ(reg);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+ }
+ I915_WRITE(reg, dspcntr);
+
+ I915_WRITE(DSPSURF(intel_crtc->plane),
+ intel_crtc->unpin_work->gtt_offset);
+ POSTING_READ(DSPSURF(intel_crtc->plane));
+}
+
+static int intel_postpone_flip(struct drm_i915_gem_object *obj)
+{
+ struct intel_engine_cs *ring;
+ int ret;
+
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ if (!obj->last_write_seqno)
+ return 0;
+
+ ring = obj->ring;
+
+ if (i915_seqno_passed(ring->get_seqno(ring, true),
+ obj->last_write_seqno))
+ return 0;
+
+ ret = i915_gem_check_olr(ring, obj->last_write_seqno);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(!ring->irq_get(ring)))
+ return 0;
+
+ return 1;
+}
+
+void intel_notify_mmio_flip(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
+ struct intel_crtc *intel_crtc;
+ unsigned long irq_flags;
+ u32 seqno;
+
+ seqno = ring->get_seqno(ring, false);
+
+ spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
+ for_each_intel_crtc(ring->dev, intel_crtc) {
+ struct intel_mmio_flip *mmio_flip;
+
+ mmio_flip = &intel_crtc->mmio_flip;
+ if (mmio_flip->seqno == 0)
+ continue;
+
+ if (ring->id != mmio_flip->ring_id)
+ continue;
+
+ if (i915_seqno_passed(seqno, mmio_flip->seqno)) {
+ intel_do_mmio_flip(intel_crtc);
+ mmio_flip->seqno = 0;
+ ring->irq_put(ring);
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+}
+
+static int intel_queue_mmio_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring,
+ uint32_t flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long irq_flags;
+ int ret;
+
+ if (WARN_ON(intel_crtc->mmio_flip.seqno))
+ return -EBUSY;
+
+ ret = intel_postpone_flip(obj);
+ if (ret < 0)
+ return ret;
+ if (ret == 0) {
+ intel_do_mmio_flip(intel_crtc);
+ return 0;
+ }
+
+ spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
+ intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
+ intel_crtc->mmio_flip.ring_id = obj->ring->id;
+ spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
+
+ /*
+ * Double check to catch cases where irq fired before
+ * mmio flip data was ready
+ */
+ intel_notify_mmio_flip(obj->ring);
+ return 0;
+}
+
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -9220,13 +9653,22 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum pipe pipe = intel_crtc->pipe;
struct intel_unpin_work *work;
struct intel_engine_cs *ring;
unsigned long flags;
int ret;
+ /*
+ * drm_mode_page_flip_ioctl() should already catch this, but double
+ * check to be safe. In the future we may enable pageflipping from
+ * a disabled primary plane.
+ */
+ if (WARN_ON(intel_fb_obj(old_fb) == NULL))
+ return -EBUSY;
+
/* Can't change pixel format via MI display flips. */
if (fb->pixel_format != crtc->primary->fb->pixel_format)
return -EINVAL;
@@ -9249,7 +9691,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->crtc = crtc;
- work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
+ work->old_fb_obj = intel_fb_obj(old_fb);
INIT_WORK(&work->work, intel_unpin_work_fn);
ret = drm_crtc_vblank_get(crtc);
@@ -9290,10 +9732,15 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
- work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(intel_crtc->pipe)) + 1;
+ work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1;
if (IS_VALLEYVIEW(dev)) {
ring = &dev_priv->ring[BCS];
+ if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
+ /* vlv: DISPLAY_FLIP fails to change tiling */
+ ring = NULL;
+ } else if (IS_IVYBRIDGE(dev)) {
+ ring = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
ring = obj->ring;
if (ring == NULL || ring->id != RCS)
@@ -9309,12 +9756,20 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->gtt_offset =
i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset;
- ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, page_flip_flags);
+ if (use_mmio_flip(ring, obj))
+ ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
+ page_flip_flags);
+ else
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring,
+ page_flip_flags);
if (ret)
goto cleanup_unpin;
+ i915_gem_track_fb(work->old_fb_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(pipe));
+
intel_disable_fbc(dev);
- intel_mark_fb_busy(obj, NULL);
+ intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
mutex_unlock(&dev->struct_mutex);
trace_i915_flip_request(intel_crtc->plane, obj);
@@ -9344,7 +9799,7 @@ out_hang:
intel_crtc_wait_for_pending_flips(crtc);
ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
if (ret == 0 && event)
- drm_send_vblank_event(dev, intel_crtc->pipe, event);
+ drm_send_vblank_event(dev, pipe, event);
}
return ret;
}
@@ -10017,11 +10472,14 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(double_wide);
+ PIPE_CONF_CHECK_X(ddi_pll_sel);
+
PIPE_CONF_CHECK_I(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
+ PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
@@ -10083,6 +10541,14 @@ check_encoder_state(struct drm_device *dev)
if (connector->base.dpms != DRM_MODE_DPMS_OFF)
active = true;
}
+ /*
+ * for MST connectors if we unplug the connector is gone
+ * away but the encoder is still connected to a crtc
+ * until a modeset happens in response to the hotplug.
+ */
+ if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
+ continue;
+
WARN(!!encoder->base.crtc != enabled,
"encoder's enabled state mismatch "
"(expected %i, found %i)\n",
@@ -10378,20 +10844,23 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* on the DPLL.
*/
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
- struct drm_framebuffer *old_fb;
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev,
- to_intel_framebuffer(fb)->obj,
+ obj,
NULL);
if (ret != 0) {
DRM_ERROR("pin & fence failed\n");
mutex_unlock(&dev->struct_mutex);
goto done;
}
- old_fb = crtc->primary->fb;
if (old_fb)
- intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
+ intel_unpin_fb_obj(old_obj);
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
mutex_unlock(&dev->struct_mutex);
crtc->primary->fb = fb;
@@ -10563,12 +11032,17 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
if (is_crtc_connector_off(set)) {
config->mode_changed = true;
} else if (set->crtc->primary->fb != set->fb) {
- /* If we have no fb then treat it as a full mode set */
+ /*
+ * If we have no fb, we can only flip as long as the crtc is
+ * active, otherwise we need a full mode set. The crtc may
+ * be active if we've only disabled the primary plane, or
+ * in fastboot situations.
+ */
if (set->crtc->primary->fb == NULL) {
struct intel_crtc *intel_crtc =
to_intel_crtc(set->crtc);
- if (intel_crtc->active && i915.fastboot) {
+ if (intel_crtc->active) {
DRM_DEBUG_KMS("crtc has no fb, will flip\n");
config->fb_changed = true;
} else {
@@ -10620,7 +11094,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
* for them. */
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro] == &connector->base) {
- connector->new_encoder = connector->encoder;
+ connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe);
break;
}
}
@@ -10666,7 +11140,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
new_crtc)) {
return -EINVAL;
}
- connector->encoder->new_crtc = to_intel_crtc(new_crtc);
+ connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
connector->base.base.id,
@@ -10700,7 +11174,12 @@ intel_modeset_stage_output_state(struct drm_device *dev,
}
}
/* Now we've also updated encoder->new_crtc for all encoders. */
-
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ base.head) {
+ if (connector->new_encoder)
+ if (connector->new_encoder != connector->encoder)
+ connector->encoder = connector->new_encoder;
+ }
for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = false;
@@ -10806,10 +11285,24 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
ret = intel_set_mode(set->crtc, set->mode,
set->x, set->y, set->fb);
} else if (config->fb_changed) {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
+
intel_crtc_wait_for_pending_flips(set->crtc);
ret = intel_pipe_set_base(set->crtc,
set->x, set->y, set->fb);
+
+ /*
+ * We need to make sure the primary plane is re-enabled if it
+ * has previously been turned off.
+ */
+ if (!intel_crtc->primary_enabled && ret == 0) {
+ WARN_ON(!intel_crtc->active);
+ intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
+ intel_crtc->pipe);
+ }
+
/*
* In the fastboot case this may be our only check of the
* state after boot. It would be better to only do it on
@@ -10850,26 +11343,21 @@ out_config:
}
static const struct drm_crtc_funcs intel_crtc_funcs = {
- .cursor_set = intel_crtc_cursor_set,
- .cursor_move = intel_crtc_cursor_move,
.gamma_set = intel_crtc_gamma_set,
.set_config = intel_crtc_set_config,
.destroy = intel_crtc_destroy,
.page_flip = intel_crtc_page_flip,
};
-static void intel_cpu_pll_init(struct drm_device *dev)
-{
- if (HAS_DDI(dev))
- intel_ddi_pll_init(dev);
-}
-
static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
uint32_t val;
+ if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ return false;
+
val = I915_READ(PCH_DPLL(pll->id));
hw_state->dpll = val;
hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
@@ -10951,7 +11439,9 @@ static void intel_shared_dpll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+ if (HAS_DDI(dev))
+ intel_ddi_pll_init(dev);
+ else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
ibx_pch_dpll_init(dev);
else
dev_priv->num_shared_dpll = 0;
@@ -10959,17 +11449,328 @@ static void intel_shared_dpll_init(struct drm_device *dev)
BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
}
+static int
+intel_primary_plane_disable(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_crtc *intel_crtc;
+
+ if (!plane->fb)
+ return 0;
+
+ BUG_ON(!plane->crtc);
+
+ intel_crtc = to_intel_crtc(plane->crtc);
+
+ /*
+ * Even though we checked plane->fb above, it's still possible that
+ * the primary plane has been implicitly disabled because the crtc
+ * coordinates given weren't visible, or because we detected
+ * that it was 100% covered by a sprite plane. Or, the CRTC may be
+ * off and we've set a fb, but haven't actually turned on the CRTC yet.
+ * In either case, we need to unpin the FB and let the fb pointer get
+ * updated, but otherwise we don't need to touch the hardware.
+ */
+ if (!intel_crtc->primary_enabled)
+ goto disable_unpin;
+
+ intel_crtc_wait_for_pending_flips(plane->crtc);
+ intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
+ intel_plane->pipe);
+disable_unpin:
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
+ intel_unpin_fb_obj(intel_fb_obj(plane->fb));
+ mutex_unlock(&dev->struct_mutex);
+ plane->fb = NULL;
+
+ return 0;
+}
+
+static int
+intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
+ struct drm_rect dest = {
+ /* integer pixels */
+ .x1 = crtc_x,
+ .y1 = crtc_y,
+ .x2 = crtc_x + crtc_w,
+ .y2 = crtc_y + crtc_h,
+ };
+ struct drm_rect src = {
+ /* 16.16 fixed point */
+ .x1 = src_x,
+ .y1 = src_y,
+ .x2 = src_x + src_w,
+ .y2 = src_y + src_h,
+ };
+ const struct drm_rect clip = {
+ /* integer pixels */
+ .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
+ .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
+ };
+ bool visible;
+ int ret;
+
+ ret = drm_plane_helper_check_update(plane, crtc, fb,
+ &src, &dest, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true, &visible);
+
+ if (ret)
+ return ret;
+
+ /*
+ * If the CRTC isn't enabled, we're just pinning the framebuffer,
+ * updating the fb pointer, and returning without touching the
+ * hardware. This allows us to later do a drmModeSetCrtc with fb=-1 to
+ * turn on the display with all planes setup as desired.
+ */
+ if (!crtc->enabled) {
+ mutex_lock(&dev->struct_mutex);
+
+ /*
+ * If we already called setplane while the crtc was disabled,
+ * we may have an fb pinned; unpin it.
+ */
+ if (plane->fb)
+ intel_unpin_fb_obj(old_obj);
+
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
+
+ /* Pin and return without programming hardware */
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+ }
+
+ intel_crtc_wait_for_pending_flips(crtc);
+
+ /*
+ * If clipping results in a non-visible primary plane, we'll disable
+ * the primary plane. Note that this is a bit different than what
+ * happens if userspace explicitly disables the plane by passing fb=0
+ * because plane->fb still gets set and pinned.
+ */
+ if (!visible) {
+ mutex_lock(&dev->struct_mutex);
+
+ /*
+ * Try to pin the new fb first so that we can bail out if we
+ * fail.
+ */
+ if (plane->fb != fb) {
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+ }
+
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
+
+ if (intel_crtc->primary_enabled)
+ intel_disable_primary_hw_plane(dev_priv,
+ intel_plane->plane,
+ intel_plane->pipe);
+
+
+ if (plane->fb != fb)
+ if (plane->fb)
+ intel_unpin_fb_obj(old_obj);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+ }
+
+ ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
+ if (ret)
+ return ret;
+
+ if (!intel_crtc->primary_enabled)
+ intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
+ intel_crtc->pipe);
+
+ return 0;
+}
+
+/* Common destruction function for both primary and cursor planes */
+static void intel_plane_destroy(struct drm_plane *plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ drm_plane_cleanup(plane);
+ kfree(intel_plane);
+}
+
+static const struct drm_plane_funcs intel_primary_plane_funcs = {
+ .update_plane = intel_primary_plane_setplane,
+ .disable_plane = intel_primary_plane_disable,
+ .destroy = intel_plane_destroy,
+};
+
+static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
+ int pipe)
+{
+ struct intel_plane *primary;
+ const uint32_t *intel_primary_formats;
+ int num_formats;
+
+ primary = kzalloc(sizeof(*primary), GFP_KERNEL);
+ if (primary == NULL)
+ return NULL;
+
+ primary->can_scale = false;
+ primary->max_downscale = 1;
+ primary->pipe = pipe;
+ primary->plane = pipe;
+ if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
+ primary->plane = !pipe;
+
+ if (INTEL_INFO(dev)->gen <= 3) {
+ intel_primary_formats = intel_primary_formats_gen2;
+ num_formats = ARRAY_SIZE(intel_primary_formats_gen2);
+ } else {
+ intel_primary_formats = intel_primary_formats_gen4;
+ num_formats = ARRAY_SIZE(intel_primary_formats_gen4);
+ }
+
+ drm_universal_plane_init(dev, &primary->base, 0,
+ &intel_primary_plane_funcs,
+ intel_primary_formats, num_formats,
+ DRM_PLANE_TYPE_PRIMARY);
+ return &primary->base;
+}
+
+static int
+intel_cursor_plane_disable(struct drm_plane *plane)
+{
+ if (!plane->fb)
+ return 0;
+
+ BUG_ON(!plane->crtc);
+
+ return intel_crtc_cursor_set_obj(plane->crtc, NULL, 0, 0);
+}
+
+static int
+intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_rect dest = {
+ /* integer pixels */
+ .x1 = crtc_x,
+ .y1 = crtc_y,
+ .x2 = crtc_x + crtc_w,
+ .y2 = crtc_y + crtc_h,
+ };
+ struct drm_rect src = {
+ /* 16.16 fixed point */
+ .x1 = src_x,
+ .y1 = src_y,
+ .x2 = src_x + src_w,
+ .y2 = src_y + src_h,
+ };
+ const struct drm_rect clip = {
+ /* integer pixels */
+ .x2 = intel_crtc->config.pipe_src_w,
+ .y2 = intel_crtc->config.pipe_src_h,
+ };
+ bool visible;
+ int ret;
+
+ ret = drm_plane_helper_check_update(plane, crtc, fb,
+ &src, &dest, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true, &visible);
+ if (ret)
+ return ret;
+
+ crtc->cursor_x = crtc_x;
+ crtc->cursor_y = crtc_y;
+ if (fb != crtc->cursor->fb) {
+ return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
+ } else {
+ intel_crtc_update_cursor(crtc, visible);
+ return 0;
+ }
+}
+static const struct drm_plane_funcs intel_cursor_plane_funcs = {
+ .update_plane = intel_cursor_plane_update,
+ .disable_plane = intel_cursor_plane_disable,
+ .destroy = intel_plane_destroy,
+};
+
+static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
+ int pipe)
+{
+ struct intel_plane *cursor;
+
+ cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
+ if (cursor == NULL)
+ return NULL;
+
+ cursor->can_scale = false;
+ cursor->max_downscale = 1;
+ cursor->pipe = pipe;
+ cursor->plane = pipe;
+
+ drm_universal_plane_init(dev, &cursor->base, 0,
+ &intel_cursor_plane_funcs,
+ intel_cursor_formats,
+ ARRAY_SIZE(intel_cursor_formats),
+ DRM_PLANE_TYPE_CURSOR);
+ return &cursor->base;
+}
+
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
- int i;
+ struct drm_plane *primary = NULL;
+ struct drm_plane *cursor = NULL;
+ int i, ret;
intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
if (intel_crtc == NULL)
return;
- drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
+ primary = intel_primary_plane_create(dev, pipe);
+ if (!primary)
+ goto fail;
+
+ cursor = intel_cursor_plane_create(dev, pipe);
+ if (!cursor)
+ goto fail;
+
+ ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
+ cursor, &intel_crtc_funcs);
+ if (ret)
+ goto fail;
drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
for (i = 0; i < 256; i++) {
@@ -10980,7 +11781,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
/*
* On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
- * is hooked to plane B. Hence we want plane A feeding pipe B.
+ * is hooked to pipe B. Hence we want plane A feeding pipe B.
*/
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
@@ -11002,6 +11803,14 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
+ return;
+
+fail:
+ if (primary)
+ drm_plane_cleanup(primary);
+ if (cursor)
+ drm_plane_cleanup(cursor);
+ kfree(intel_crtc);
}
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
@@ -11021,21 +11830,20 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
- struct drm_mode_object *drmmode_obj;
+ struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
- drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
- DRM_MODE_OBJECT_CRTC);
+ drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
- if (!drmmode_obj) {
+ if (!drmmode_crtc) {
DRM_ERROR("no such CRTC id\n");
return -ENOENT;
}
- crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+ crtc = to_intel_crtc(drmmode_crtc);
pipe_from_crtc_id->pipe = crtc->pipe;
return 0;
@@ -11236,6 +12044,8 @@ static void intel_setup_outputs(struct drm_device *dev)
if (SUPPORTS_TV(dev))
intel_tv_init(dev);
+ intel_edp_psr_init(dev);
+
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
encoder->base.possible_crtcs = encoder->crtc_mask;
encoder->base.possible_clones =
@@ -11249,11 +12059,14 @@ static void intel_setup_outputs(struct drm_device *dev)
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
+ struct drm_device *dev = fb->dev;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
drm_framebuffer_cleanup(fb);
+ mutex_lock(&dev->struct_mutex);
WARN_ON(!intel_fb->obj->framebuffer_references--);
- drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
+ drm_gem_object_unreference(&intel_fb->obj->base);
+ mutex_unlock(&dev->struct_mutex);
kfree(intel_fb);
}
@@ -11438,7 +12251,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
- dev_priv->display.off = haswell_crtc_off;
+ dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_primary_plane =
ironlake_update_primary_plane;
} else if (HAS_PCH_SPLIT(dev)) {
@@ -11722,6 +12535,9 @@ void intel_modeset_init_hw(struct drm_device *dev)
{
intel_prepare_ddi(dev);
+ if (IS_VALLEYVIEW(dev))
+ vlv_update_cdclk(dev);
+
intel_init_clock_gating(dev);
intel_reset_dpio(dev);
@@ -11798,7 +12614,6 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_dpio(dev);
intel_reset_dpio(dev);
- intel_cpu_pll_init(dev);
intel_shared_dpll_init(dev);
/* Just disable it once at startup */
@@ -12024,6 +12839,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
encoder->base.base.id,
encoder->base.name);
encoder->disable(encoder);
+ if (encoder->post_disable)
+ encoder->post_disable(encoder);
}
encoder->base.crtc = NULL;
encoder->connectors_active = false;
@@ -12108,10 +12925,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->active ? "enabled" : "disabled");
}
- /* FIXME: Smash this into the new shared dpll infrastructure. */
- if (HAS_DDI(dev))
- intel_ddi_setup_hw_pll_state(dev);
-
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
@@ -12125,6 +12938,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
pll->name, pll->refcount, pll->on);
+
+ if (pll->refcount)
+ intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
@@ -12242,7 +13058,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
void intel_modeset_gem_init(struct drm_device *dev)
{
struct drm_crtc *c;
- struct intel_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
mutex_lock(&dev->struct_mutex);
intel_init_gt_powersave(dev);
@@ -12259,11 +13075,11 @@ void intel_modeset_gem_init(struct drm_device *dev)
*/
mutex_lock(&dev->struct_mutex);
for_each_crtc(dev, c) {
- if (!c->primary->fb)
+ obj = intel_fb_obj(c->primary->fb);
+ if (obj == NULL)
continue;
- fb = to_intel_framebuffer(c->primary->fb);
- if (intel_pin_and_fence_fb_obj(dev, fb->obj, NULL)) {
+ if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) {
DRM_ERROR("failed to pin boot fb on pipe %d\n",
to_intel_crtc(c)->pipe);
drm_framebuffer_unreference(c->primary->fb);
@@ -12278,13 +13094,12 @@ void intel_connector_unregister(struct intel_connector *intel_connector)
struct drm_connector *connector = &intel_connector->base;
intel_panel_destroy_backlight(connector);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
}
void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
struct drm_connector *connector;
/*
@@ -12294,6 +13109,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
*/
drm_irq_uninstall(dev);
cancel_work_sync(&dev_priv->hotplug_work);
+ dev_priv->pm._irqs_disabled = true;
+
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
@@ -12304,14 +13121,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_unregister_dsm_handler();
- for_each_crtc(dev, crtc) {
- /* Skip inactive CRTCs */
- if (!crtc->primary->fb)
- continue;
-
- intel_increase_pllclock(crtc);
- }
-
intel_disable_fbc(dev);
intel_disable_gt_powersave(dev);
@@ -12479,7 +13288,7 @@ intel_display_capture_error_state(struct drm_device *dev)
error->pipe[i].source = I915_READ(PIPESRC(i));
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
error->pipe[i].stat = I915_READ(PIPESTAT(i));
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8a1a4fbc06ac..eb52ecfe14cf 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -114,7 +114,7 @@ static void intel_dp_link_down(struct intel_dp *intel_dp);
static bool _edp_panel_vdd_on(struct intel_dp *intel_dp);
static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-static int
+int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
@@ -773,12 +773,29 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
{
struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
- sysfs_remove_link(&intel_connector->base.kdev->kobj,
- intel_dp->aux.ddc.dev.kobj.name);
+ if (!intel_connector->mst_port)
+ sysfs_remove_link(&intel_connector->base.kdev->kobj,
+ intel_dp->aux.ddc.dev.kobj.name);
intel_connector_unregister(intel_connector);
}
static void
+hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
+{
+ switch (link_bw) {
+ case DP_LINK_BW_1_62:
+ pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
+ break;
+ case DP_LINK_BW_2_7:
+ pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
+ break;
+ case DP_LINK_BW_5_4:
+ pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
+ break;
+ }
+}
+
+static void
intel_dp_set_clock(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config, int link_bw)
{
@@ -789,8 +806,6 @@ intel_dp_set_clock(struct intel_encoder *encoder,
if (IS_G4X(dev)) {
divisor = gen4_dpll;
count = ARRAY_SIZE(gen4_dpll);
- } else if (IS_HASWELL(dev)) {
- /* Haswell has special-purpose DP DDI clocks. */
} else if (HAS_PCH_SPLIT(dev)) {
divisor = pch_dpll;
count = ARRAY_SIZE(pch_dpll);
@@ -961,7 +976,10 @@ found:
&pipe_config->dp_m2_n2);
}
- intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
+ if (HAS_DDI(dev))
+ hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
+ else
+ intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
return true;
}
@@ -1349,8 +1367,6 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power off\n");
- edp_wait_backlight_off(intel_dp);
-
WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
pp = ironlake_get_pp_control(intel_dp);
@@ -1386,6 +1402,9 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
return;
DRM_DEBUG_KMS("\n");
+
+ intel_panel_enable_backlight(intel_dp->attached_connector);
+
/*
* If we enable the backlight right away following a panel power
* on, we may see slight flicker as the panel syncs with the eDP
@@ -1400,8 +1419,6 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
-
- intel_panel_enable_backlight(intel_dp->attached_connector);
}
void intel_edp_backlight_off(struct intel_dp *intel_dp)
@@ -1414,8 +1431,6 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
if (!is_edp(intel_dp))
return;
- intel_panel_disable_backlight(intel_dp->attached_connector);
-
DRM_DEBUG_KMS("\n");
pp = ironlake_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
@@ -1425,6 +1440,10 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp)
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
intel_dp->last_backlight_off = jiffies;
+
+ edp_wait_backlight_off(intel_dp);
+
+ intel_panel_disable_backlight(intel_dp->attached_connector);
}
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
@@ -1646,11 +1665,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
}
-static bool is_edp_psr(struct drm_device *dev)
+static bool is_edp_psr(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- return dev_priv->psr.sink_support;
+ return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
}
static bool intel_edp_is_psr_enabled(struct drm_device *dev)
@@ -1698,9 +1715,6 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_vsc_psr psr_vsc;
- if (intel_dp->psr_setup_done)
- return;
-
/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
psr_vsc.sdp_header.HB0 = 0;
@@ -1712,22 +1726,25 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
/* Avoid continuous PSR exit by masking memup and hpd */
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
-
- intel_dp->psr_setup_done = true;
}
static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t aux_clock_divider;
int precharge = 0x3;
int msg_size = 5; /* Header(4) + Message(1) */
+ bool only_standby = false;
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
+ if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+ only_standby = true;
+
/* Enable PSR in sink */
- if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
else
@@ -1746,18 +1763,24 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t max_sleep_time = 0x1f;
uint32_t idle_frames = 1;
uint32_t val = 0x0;
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+ bool only_standby = false;
- if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
+ if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
+ only_standby = true;
+
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
val |= EDP_PSR_LINK_STANDBY;
val |= EDP_PSR_TP2_TP3_TIME_0us;
val |= EDP_PSR_TP1_TIME_0us;
val |= EDP_PSR_SKIP_AUX_EXIT;
+ val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
} else
val |= EDP_PSR_LINK_DISABLE;
@@ -1775,18 +1798,15 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dig_port->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj;
- struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
- dev_priv->psr.source_ok = false;
+ lockdep_assert_held(&dev_priv->psr.lock);
+ lockdep_assert_held(&dev->struct_mutex);
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
- if (!HAS_PSR(dev)) {
- DRM_DEBUG_KMS("PSR not supported on this platform\n");
- return false;
- }
+ dev_priv->psr.source_ok = false;
- if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
- (dig_port->port != PORT_A)) {
+ if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
return false;
}
@@ -1796,29 +1816,9 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
- crtc = dig_port->base.base.crtc;
- if (crtc == NULL) {
- DRM_DEBUG_KMS("crtc not active for PSR\n");
- return false;
- }
-
- intel_crtc = to_intel_crtc(crtc);
- if (!intel_crtc_active(crtc)) {
- DRM_DEBUG_KMS("crtc not active for PSR\n");
- return false;
- }
-
- obj = to_intel_framebuffer(crtc->primary->fb)->obj;
- if (obj->tiling_mode != I915_TILING_X ||
- obj->fence_reg == I915_FENCE_REG_NONE) {
- DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
- return false;
- }
-
- if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
- DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
- return false;
- }
+ /* Below limitations aren't valid for Broadwell */
+ if (IS_BROADWELL(dev))
+ goto out;
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
S3D_ENABLE) {
@@ -1831,35 +1831,60 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
+ out:
dev_priv->psr.source_ok = true;
return true;
}
static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
{
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
-
- if (!intel_edp_psr_match_conditions(intel_dp) ||
- intel_edp_is_psr_enabled(dev))
- return;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
- /* Setup PSR once */
- intel_edp_psr_setup(intel_dp);
+ WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+ WARN_ON(dev_priv->psr.active);
+ lockdep_assert_held(&dev_priv->psr.lock);
/* Enable PSR on the panel */
intel_edp_psr_enable_sink(intel_dp);
/* Enable PSR on the host */
intel_edp_psr_enable_source(intel_dp);
+
+ dev_priv->psr.active = true;
}
void intel_edp_psr_enable(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!HAS_PSR(dev)) {
+ DRM_DEBUG_KMS("PSR not supported on this platform\n");
+ return;
+ }
+
+ if (!is_edp_psr(intel_dp)) {
+ DRM_DEBUG_KMS("PSR not supported by this panel\n");
+ return;
+ }
- if (intel_edp_psr_match_conditions(intel_dp) &&
- !intel_edp_is_psr_enabled(dev))
- intel_edp_psr_do_enable(intel_dp);
+ mutex_lock(&dev_priv->psr.lock);
+ if (dev_priv->psr.enabled) {
+ DRM_DEBUG_KMS("PSR already in use\n");
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ dev_priv->psr.busy_frontbuffer_bits = 0;
+
+ /* Setup PSR once */
+ intel_edp_psr_setup(intel_dp);
+
+ if (intel_edp_psr_match_conditions(intel_dp))
+ dev_priv->psr.enabled = intel_dp;
+ mutex_unlock(&dev_priv->psr.lock);
}
void intel_edp_psr_disable(struct intel_dp *intel_dp)
@@ -1867,36 +1892,136 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!intel_edp_is_psr_enabled(dev))
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
return;
+ }
+
+ if (dev_priv->psr.active) {
+ I915_WRITE(EDP_PSR_CTL(dev),
+ I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
+
+ /* Wait till PSR is idle */
+ if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
+ EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+ DRM_ERROR("Timed out waiting for PSR Idle State\n");
- I915_WRITE(EDP_PSR_CTL(dev),
- I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
+ dev_priv->psr.active = false;
+ } else {
+ WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
+ }
+
+ dev_priv->psr.enabled = NULL;
+ mutex_unlock(&dev_priv->psr.lock);
- /* Wait till PSR is idle */
- if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
- EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
- DRM_ERROR("Timed out waiting for PSR Idle State\n");
+ cancel_delayed_work_sync(&dev_priv->psr.work);
}
-void intel_edp_psr_update(struct drm_device *dev)
+static void intel_edp_psr_work(struct work_struct *work)
{
- struct intel_encoder *encoder;
- struct intel_dp *intel_dp = NULL;
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), psr.work.work);
+ struct intel_dp *intel_dp = dev_priv->psr.enabled;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
- if (encoder->type == INTEL_OUTPUT_EDP) {
- intel_dp = enc_to_intel_dp(&encoder->base);
+ mutex_lock(&dev_priv->psr.lock);
+ intel_dp = dev_priv->psr.enabled;
- if (!is_edp_psr(dev))
- return;
+ if (!intel_dp)
+ goto unlock;
- if (!intel_edp_psr_match_conditions(intel_dp))
- intel_edp_psr_disable(intel_dp);
- else
- if (!intel_edp_is_psr_enabled(dev))
- intel_edp_psr_do_enable(intel_dp);
- }
+ /*
+ * The delayed work can race with an invalidate hence we need to
+ * recheck. Since psr_flush first clears this and then reschedules we
+ * won't ever miss a flush when bailing out here.
+ */
+ if (dev_priv->psr.busy_frontbuffer_bits)
+ goto unlock;
+
+ intel_edp_psr_do_enable(intel_dp);
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+static void intel_edp_psr_do_exit(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->psr.active) {
+ u32 val = I915_READ(EDP_PSR_CTL(dev));
+
+ WARN_ON(!(val & EDP_PSR_ENABLE));
+
+ I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
+
+ dev_priv->psr.active = false;
+ }
+
+}
+
+void intel_edp_psr_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+
+ intel_edp_psr_do_exit(dev);
+
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+
+ dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+void intel_edp_psr_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ mutex_lock(&dev_priv->psr.lock);
+ if (!dev_priv->psr.enabled) {
+ mutex_unlock(&dev_priv->psr.lock);
+ return;
+ }
+
+ crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+ dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
+
+ /*
+ * On Haswell sprite plane updates don't result in a psr invalidating
+ * signal in the hardware. Which means we need to manually fake this in
+ * software for all flushes, not just when we've seen a preceding
+ * invalidation through frontbuffer rendering.
+ */
+ if (IS_HASWELL(dev) &&
+ (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
+ intel_edp_psr_do_exit(dev);
+
+ if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
+ schedule_delayed_work(&dev_priv->psr.work,
+ msecs_to_jiffies(100));
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
+void intel_edp_psr_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
+ mutex_init(&dev_priv->psr.lock);
}
static void intel_disable_dp(struct intel_encoder *encoder)
@@ -2152,6 +2277,70 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
vlv_wait_port_ready(dev_priv, dport);
}
+static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* program left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA1_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA1_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA2_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA2_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
+ /* program clock channel usage */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+
+ /*
+ * This a a bit weird since generally CL
+ * matches the pipe, but here we need to
+ * pick the CL based on the port.
+ */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+ if (pipe != PIPE_B)
+ val &= ~CHV_CMN_USEDCLKCHANNEL;
+ else
+ val |= CHV_CMN_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
/*
* Native read with retry for link status and receiver capability reads for
* cases where the sink may still be asleep.
@@ -2189,18 +2378,14 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
-/*
- * These are source-specific values; current Intel hardware supports
- * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
- */
-
+/* These are source-specific values. */
static uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev))
+ if (IS_VALLEYVIEW(dev))
return DP_TRAIN_VOLTAGE_SWING_1200;
else if (IS_GEN7(dev) && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_800;
@@ -2216,18 +2401,7 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_BROADWELL(dev)) {
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_400:
- case DP_TRAIN_VOLTAGE_SWING_600:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_800:
- return DP_TRAIN_PRE_EMPHASIS_3_5;
- case DP_TRAIN_VOLTAGE_SWING_1200:
- default:
- return DP_TRAIN_PRE_EMPHASIS_0;
- }
- } else if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_9_5;
@@ -2699,41 +2873,6 @@ intel_hsw_signal_levels(uint8_t train_set)
}
}
-static uint32_t
-intel_bdw_signal_levels(uint8_t train_set)
-{
- int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
- DP_TRAIN_PRE_EMPHASIS_MASK);
- switch (signal_levels) {
- case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
- return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
- case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
- return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */
- case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
- return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */
-
- case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
- return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */
- case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
- return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */
- case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
- return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */
-
- case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
- return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */
- case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
- return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */
-
- case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
- return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */
-
- default:
- DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
- "0x%x\n", signal_levels);
- return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */
- }
-}
-
/* Properly updates "DP" with the correct signal levels. */
static void
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
@@ -2744,10 +2883,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
uint32_t signal_levels, mask;
uint8_t train_set = intel_dp->train_set[0];
- if (IS_BROADWELL(dev)) {
- signal_levels = intel_bdw_signal_levels(train_set);
- mask = DDI_BUF_EMP_MASK;
- } else if (IS_HASWELL(dev)) {
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_CHERRYVIEW(dev)) {
@@ -3246,6 +3382,33 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
edp_panel_vdd_off(intel_dp, false);
}
+static bool
+intel_dp_probe_mst(struct intel_dp *intel_dp)
+{
+ u8 buf[1];
+
+ if (!intel_dp->can_mst)
+ return false;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
+ return false;
+
+ _edp_panel_vdd_on(intel_dp);
+ if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
+ if (buf[0] & DP_MST_CAP) {
+ DRM_DEBUG_KMS("Sink is MST capable\n");
+ intel_dp->is_mst = true;
+ } else {
+ DRM_DEBUG_KMS("Sink is not MST capable\n");
+ intel_dp->is_mst = false;
+ }
+ }
+ edp_panel_vdd_off(intel_dp, false);
+
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+ return intel_dp->is_mst;
+}
+
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -3283,6 +3446,20 @@ intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
sink_irq_vector, 1) == 1;
}
+static bool
+intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
+{
+ int ret;
+
+ ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
+ DP_SINK_COUNT_ESI,
+ sink_irq_vector, 14);
+ if (ret != 14)
+ return false;
+
+ return true;
+}
+
static void
intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
@@ -3290,6 +3467,63 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
}
+static int
+intel_dp_check_mst_status(struct intel_dp *intel_dp)
+{
+ bool bret;
+
+ if (intel_dp->is_mst) {
+ u8 esi[16] = { 0 };
+ int ret = 0;
+ int retry;
+ bool handled;
+ bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
+go_again:
+ if (bret == true) {
+
+ /* check link status - esi[10] = 0x200c */
+ if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
+ DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+ }
+
+ DRM_DEBUG_KMS("got esi %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
+
+ if (handled) {
+ for (retry = 0; retry < 3; retry++) {
+ int wret;
+ wret = drm_dp_dpcd_write(&intel_dp->aux,
+ DP_SINK_COUNT_ESI+1,
+ &esi[1], 3);
+ if (wret == 3) {
+ break;
+ }
+ }
+
+ bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
+ if (bret == true) {
+ DRM_DEBUG_KMS("got esi2 %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ goto go_again;
+ }
+ } else
+ ret = 0;
+
+ return ret;
+ } else {
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+ /* send a hotplug event */
+ drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
+ }
+ }
+ return -EINVAL;
+}
+
/*
* According to DP spec
* 5.1.2:
@@ -3298,15 +3532,16 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
* 3. Use Link Training from 2.5.3.3 and 3.5.1.3
* 4. Check link status on receipt of hot-plug interrupt
*/
-
void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
- /* FIXME: This access isn't protected by any locks. */
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
if (!intel_encoder->connectors_active)
return;
@@ -3518,8 +3753,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status;
enum intel_display_power_domain power_domain;
struct edid *edid = NULL;
-
- intel_runtime_pm_get(dev_priv);
+ bool ret;
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
@@ -3527,6 +3761,14 @@ intel_dp_detect(struct drm_connector *connector, bool force)
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
+ if (intel_dp->is_mst) {
+ /* MST devices are disconnected from a monitor POV */
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ status = connector_status_disconnected;
+ goto out;
+ }
+
intel_dp->has_audio = false;
if (HAS_PCH_SPLIT(dev))
@@ -3539,6 +3781,16 @@ intel_dp_detect(struct drm_connector *connector, bool force)
intel_dp_probe_oui(intel_dp);
+ ret = intel_dp_probe_mst(intel_dp);
+ if (ret) {
+ /* if we are in MST mode then this connector
+ won't appear connected or have anything with EDID on it */
+ if (intel_encoder->type != INTEL_OUTPUT_EDP)
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ status = connector_status_disconnected;
+ goto out;
+ }
+
if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
} else {
@@ -3555,9 +3807,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
out:
intel_display_power_put(dev_priv, power_domain);
-
- intel_runtime_pm_put(dev_priv);
-
return status;
}
@@ -3734,6 +3983,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
drm_dp_aux_unregister(&intel_dp->aux);
+ intel_dp_mst_encoder_cleanup(intel_dig_port);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
@@ -3766,12 +4016,64 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
.destroy = intel_dp_encoder_destroy,
};
-static void
+void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ return;
+}
+
+bool
+intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
+{
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+ if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
+ intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
+
+ DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,
+ long_hpd ? "long" : "short");
+
+ if (long_hpd) {
+ if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
+ goto mst_fail;
- intel_dp_check_link_status(intel_dp);
+ if (!intel_dp_get_dpcd(intel_dp)) {
+ goto mst_fail;
+ }
+
+ intel_dp_probe_oui(intel_dp);
+
+ if (!intel_dp_probe_mst(intel_dp))
+ goto mst_fail;
+
+ } else {
+ if (intel_dp->is_mst) {
+ ret = intel_dp_check_mst_status(intel_dp);
+ if (ret == -EINVAL)
+ goto mst_fail;
+ }
+
+ if (!intel_dp->is_mst) {
+ /*
+ * we'll check the link status via the normal hot plug path later -
+ * but for short hpds we should check it now
+ */
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ intel_dp_check_link_status(intel_dp);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ }
+ }
+ return false;
+mst_fail:
+ /* if we were in MST mode, and device is not there get out of MST mode */
+ if (intel_dp->is_mst) {
+ DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
+ intel_dp->is_mst = false;
+ drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
+ }
+ return true;
}
/* Return which DP Port should be selected for Transcoder DP control */
@@ -3822,7 +4124,7 @@ bool intel_dp_is_edp(struct drm_device *dev, enum port port)
return false;
}
-static void
+void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -4035,6 +4337,11 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
return;
}
+ /*
+ * FIXME: This needs proper synchronization with psr state. But really
+ * hard to tell without seeing the user of this function of this code.
+ * Check locking and ordering once that lands.
+ */
if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) {
DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
return;
@@ -4288,7 +4595,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
edp_panel_vdd_work);
intel_connector_attach_encoder(intel_connector, intel_encoder);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
if (HAS_DDI(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
@@ -4321,7 +4628,12 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_aux_init(intel_dp, intel_connector);
- intel_dp->psr_setup_done = false;
+ /* init MST on ports that can support it */
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ if (port == PORT_B || port == PORT_C || port == PORT_D) {
+ intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id);
+ }
+ }
if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) {
drm_dp_aux_unregister(&intel_dp->aux);
@@ -4331,7 +4643,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
edp_panel_vdd_off_sync(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
return false;
}
@@ -4353,6 +4665,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
void
intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
@@ -4379,6 +4692,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
if (IS_CHERRYVIEW(dev)) {
+ intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
intel_encoder->pre_enable = chv_pre_enable_dp;
intel_encoder->enable = vlv_enable_dp;
intel_encoder->post_disable = chv_post_disable_dp;
@@ -4408,9 +4722,55 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_encoder->cloneable = 0;
intel_encoder->hot_plug = intel_dp_hot_plug;
+ intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
+ dev_priv->hpd_irq_port[port] = intel_dig_port;
+
if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
drm_encoder_cleanup(encoder);
kfree(intel_dig_port);
kfree(intel_connector);
}
}
+
+void intel_dp_mst_suspend(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* disable MST */
+ for (i = 0; i < I915_MAX_PORTS; i++) {
+ struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+ if (!intel_dig_port)
+ continue;
+
+ if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+ if (!intel_dig_port->dp.can_mst)
+ continue;
+ if (intel_dig_port->dp.is_mst)
+ drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
+ }
+ }
+}
+
+void intel_dp_mst_resume(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < I915_MAX_PORTS; i++) {
+ struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
+ if (!intel_dig_port)
+ continue;
+ if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
+ int ret;
+
+ if (!intel_dig_port->dp.can_mst)
+ continue;
+
+ ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
+ if (ret != 0) {
+ intel_dp_check_mst_status(&intel_dig_port->dp);
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
new file mode 100644
index 000000000000..d9a7a7865f66
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -0,0 +1,548 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ * 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drmP.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+
+static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = encoder->base.dev;
+ int bpp;
+ int lane_count, slots;
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+ struct intel_connector *found = NULL, *intel_connector;
+ int mst_pbn;
+
+ pipe_config->dp_encoder_is_mst = true;
+ pipe_config->has_pch_encoder = false;
+ pipe_config->has_dp_encoder = true;
+ bpp = 24;
+ /*
+ * for MST we always configure max link bw - the spec doesn't
+ * seem to suggest we should do otherwise.
+ */
+ lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+ intel_dp->link_bw = intel_dp_max_link_bw(intel_dp);
+ intel_dp->lane_count = lane_count;
+
+ pipe_config->pipe_bpp = 24;
+ pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
+
+ list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
+ if (intel_connector->new_encoder == encoder) {
+ found = intel_connector;
+ break;
+ }
+ }
+
+ if (!found) {
+ DRM_ERROR("can't find connector\n");
+ return false;
+ }
+
+ mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp);
+
+ pipe_config->pbn = mst_pbn;
+ slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn);
+
+ intel_link_compute_m_n(bpp, lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n);
+
+ pipe_config->dp_m_n.tu = slots;
+ return true;
+
+}
+
+static void intel_mst_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ int ret;
+
+ DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+
+ drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, intel_mst->port);
+
+ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+ if (ret) {
+ DRM_ERROR("failed to update payload %d\n", ret);
+ }
+}
+
+static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+ DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+
+ /* this can fail */
+ drm_dp_check_act_status(&intel_dp->mst_mgr);
+ /* and this can also fail */
+ drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+
+ drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, intel_mst->port);
+
+ intel_dp->active_mst_links--;
+ intel_mst->port = NULL;
+ if (intel_dp->active_mst_links == 0) {
+ intel_dig_port->base.post_disable(&intel_dig_port->base);
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ }
+}
+
+static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+ int ret;
+ uint32_t temp;
+ struct intel_connector *found = NULL, *intel_connector;
+ int slots;
+ struct drm_crtc *crtc = encoder->base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) {
+ if (intel_connector->new_encoder == encoder) {
+ found = intel_connector;
+ break;
+ }
+ }
+
+ if (!found) {
+ DRM_ERROR("can't find connector\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+ intel_mst->port = found->port;
+
+ if (intel_dp->active_mst_links == 0) {
+ enum port port = intel_ddi_get_encoder_port(encoder);
+
+ I915_WRITE(PORT_CLK_SEL(port), intel_crtc->config.ddi_pll_sel);
+
+ intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
+
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+
+
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_complete_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+ }
+
+ ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
+ intel_mst->port, intel_crtc->config.pbn, &slots);
+ if (ret == false) {
+ DRM_ERROR("failed to allocate vcpi\n");
+ return;
+ }
+
+
+ intel_dp->active_mst_links++;
+ temp = I915_READ(DP_TP_STATUS(port));
+ I915_WRITE(DP_TP_STATUS(port), temp);
+
+ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+}
+
+static void intel_mst_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum port port = intel_dig_port->port;
+ int ret;
+
+ DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+
+ if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT),
+ 1))
+ DRM_ERROR("Timed out waiting for ACT sent\n");
+
+ ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
+
+ ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+}
+
+static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ *pipe = intel_mst->pipe;
+ if (intel_mst->port)
+ return true;
+ return false;
+}
+
+static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
+ u32 temp, flags = 0;
+
+ pipe_config->has_dp_encoder = true;
+
+ temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ if (temp & TRANS_DDI_PHSYNC)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+ if (temp & TRANS_DDI_PVSYNC)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+
+ switch (temp & TRANS_DDI_BPC_MASK) {
+ case TRANS_DDI_BPC_6:
+ pipe_config->pipe_bpp = 18;
+ break;
+ case TRANS_DDI_BPC_8:
+ pipe_config->pipe_bpp = 24;
+ break;
+ case TRANS_DDI_BPC_10:
+ pipe_config->pipe_bpp = 30;
+ break;
+ case TRANS_DDI_BPC_12:
+ pipe_config->pipe_bpp = 36;
+ break;
+ default:
+ break;
+ }
+ pipe_config->adjusted_mode.flags |= flags;
+ intel_dp_get_m_n(crtc, pipe_config);
+
+ intel_ddi_clock_get(&intel_dig_port->base, pipe_config);
+}
+
+static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+ struct edid *edid;
+ int ret;
+
+ edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
+ if (!edid)
+ return 0;
+
+ ret = intel_connector_update_modes(connector, edid);
+ kfree(edid);
+
+ return ret;
+}
+
+static enum drm_connector_status
+intel_mst_port_dp_detect(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+
+ return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port);
+}
+
+static enum drm_connector_status
+intel_dp_mst_detect(struct drm_connector *connector, bool force)
+{
+ enum drm_connector_status status;
+ status = intel_mst_port_dp_detect(connector);
+ return status;
+}
+
+static int
+intel_dp_mst_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static void
+intel_dp_mst_connector_destroy(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ if (!IS_ERR_OR_NULL(intel_connector->edid))
+ kfree(intel_connector->edid);
+
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
+ .dpms = intel_connector_dpms,
+ .detect = intel_dp_mst_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = intel_dp_mst_set_property,
+ .destroy = intel_dp_mst_connector_destroy,
+};
+
+static int intel_dp_mst_get_modes(struct drm_connector *connector)
+{
+ return intel_dp_mst_get_ddc_modes(connector);
+}
+
+static enum drm_mode_status
+intel_dp_mst_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* TODO - validate mode against available PBN for link */
+ if (mode->clock < 10000)
+ return MODE_CLOCK_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_H_ILLEGAL;
+
+ return MODE_OK;
+}
+
+static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
+ return &intel_dp->mst_encoders[0]->base.base;
+}
+
+static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
+ .get_modes = intel_dp_mst_get_modes,
+ .mode_valid = intel_dp_mst_mode_valid,
+ .best_encoder = intel_mst_best_encoder,
+};
+
+static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_mst);
+}
+
+static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
+ .destroy = intel_dp_mst_encoder_destroy,
+};
+
+static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
+{
+ if (connector->encoder) {
+ enum pipe pipe;
+ if (!connector->encoder->get_hw_state(connector->encoder, &pipe))
+ return false;
+ return true;
+ }
+ return false;
+}
+
+static void intel_connector_add_to_fbdev(struct intel_connector *connector)
+{
+#ifdef CONFIG_DRM_I915_FBDEV
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper, &connector->base);
+#endif
+}
+
+static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
+{
+#ifdef CONFIG_DRM_I915_FBDEV
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper, &connector->base);
+#endif
+}
+
+static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *pathprop)
+{
+ struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+ int i;
+
+ intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+ if (!intel_connector)
+ return NULL;
+
+ connector = &intel_connector->base;
+ drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
+ drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
+
+ intel_connector->unregister = intel_connector_unregister;
+ intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
+ intel_connector->mst_port = intel_dp;
+ intel_connector->port = port;
+
+ for (i = PIPE_A; i <= PIPE_C; i++) {
+ drm_mode_connector_attach_encoder(&intel_connector->base,
+ &intel_dp->mst_encoders[i]->base.base);
+ }
+ intel_dp_add_properties(intel_dp, connector);
+
+ drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
+ drm_mode_connector_set_path_property(connector, pathprop);
+ drm_reinit_primary_mode_group(dev);
+ mutex_lock(&dev->mode_config.mutex);
+ intel_connector_add_to_fbdev(intel_connector);
+ mutex_unlock(&dev->mode_config.mutex);
+ drm_connector_register(&intel_connector->base);
+ return connector;
+}
+
+static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_device *dev = connector->dev;
+ /* need to nuke the connector */
+ mutex_lock(&dev->mode_config.mutex);
+ intel_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ intel_connector->unregister(intel_connector);
+
+ mutex_lock(&dev->mode_config.mutex);
+ intel_connector_remove_from_fbdev(intel_connector);
+ drm_connector_cleanup(connector);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ drm_reinit_primary_mode_group(dev);
+
+ kfree(intel_connector);
+ DRM_DEBUG_KMS("\n");
+}
+
+static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+static struct drm_dp_mst_topology_cbs mst_cbs = {
+ .add_connector = intel_dp_add_mst_connector,
+ .destroy_connector = intel_dp_destroy_mst_connector,
+ .hotplug = intel_dp_mst_hotplug,
+};
+
+static struct intel_dp_mst_encoder *
+intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum pipe pipe)
+{
+ struct intel_dp_mst_encoder *intel_mst;
+ struct intel_encoder *intel_encoder;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+
+ intel_mst = kzalloc(sizeof(*intel_mst), GFP_KERNEL);
+
+ if (!intel_mst)
+ return NULL;
+
+ intel_mst->pipe = pipe;
+ intel_encoder = &intel_mst->base;
+ intel_mst->primary = intel_dig_port;
+
+ drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
+ DRM_MODE_ENCODER_DPMST);
+
+ intel_encoder->type = INTEL_OUTPUT_DP_MST;
+ intel_encoder->crtc_mask = 0x7;
+ intel_encoder->cloneable = 0;
+
+ intel_encoder->compute_config = intel_dp_mst_compute_config;
+ intel_encoder->disable = intel_mst_disable_dp;
+ intel_encoder->post_disable = intel_mst_post_disable_dp;
+ intel_encoder->pre_enable = intel_mst_pre_enable_dp;
+ intel_encoder->enable = intel_mst_enable_dp;
+ intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
+ intel_encoder->get_config = intel_dp_mst_enc_get_config;
+
+ return intel_mst;
+
+}
+
+static bool
+intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
+{
+ int i;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+ for (i = PIPE_A; i <= PIPE_C; i++)
+ intel_dp->mst_encoders[i] = intel_dp_create_fake_mst_encoder(intel_dig_port, i);
+ return true;
+}
+
+int
+intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
+{
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ int ret;
+
+ intel_dp->can_mst = true;
+ intel_dp->mst_mgr.cbs = &mst_cbs;
+
+ /* create encoders */
+ intel_dp_create_fake_mst_encoders(intel_dig_port);
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev->dev, &intel_dp->aux, 16, 3, conn_base_id);
+ if (ret) {
+ intel_dp->can_mst = false;
+ return ret;
+ }
+ return 0;
+}
+
+void
+intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port)
+{
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+ if (!intel_dp->can_mst)
+ return;
+
+ drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
+ /* encoders will get killed by normal cleanup */
+}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f67340ed2c12..8a475a6909c3 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -32,7 +32,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/drm_dp_mst_helper.h>
/**
* _wait_for - magic (register) wait macro
@@ -100,6 +100,7 @@
#define INTEL_OUTPUT_EDP 8
#define INTEL_OUTPUT_DSI 9
#define INTEL_OUTPUT_UNKNOWN 10
+#define INTEL_OUTPUT_DP_MST 11
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
@@ -165,6 +166,7 @@ struct intel_panel {
struct {
bool present;
u32 level;
+ u32 min;
u32 max;
bool enabled;
bool combination_mode; /* gen 2/4 only */
@@ -207,6 +209,10 @@ struct intel_connector {
/* since POLL and HPD connectors may use the same HPD line keep the native
state of connector->polled in case hotplug storm detection changes it */
u8 polled;
+
+ void *port; /* store this opaque as its illegal to dereference it */
+
+ struct intel_dp *mst_port;
};
typedef struct dpll {
@@ -307,6 +313,9 @@ struct intel_crtc_config {
/* Selected dpll when shared or DPLL_ID_PRIVATE. */
enum intel_dpll_id shared_dpll;
+ /* PORT_CLK_SEL for DDI ports. */
+ uint32_t ddi_pll_sel;
+
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
@@ -338,6 +347,7 @@ struct intel_crtc_config {
u32 pos;
u32 size;
bool enabled;
+ bool force_thru;
} pch_pfit;
/* FDI configuration, only valid if has_pch_encoder is set. */
@@ -347,6 +357,9 @@ struct intel_crtc_config {
bool ips_enabled;
bool double_wide;
+
+ bool dp_encoder_is_mst;
+ int pbn;
};
struct intel_pipe_wm {
@@ -358,6 +371,11 @@ struct intel_pipe_wm {
bool sprites_scaled;
};
+struct intel_mmio_flip {
+ u32 seqno;
+ u32 ring_id;
+};
+
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -384,7 +402,6 @@ struct intel_crtc {
struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
- int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
uint32_t cursor_cntl;
uint32_t cursor_base;
@@ -394,8 +411,6 @@ struct intel_crtc {
struct intel_crtc_config *new_config;
bool new_enabled;
- uint32_t ddi_pll_sel;
-
/* reset counter value when the last flip was submitted */
unsigned int reset_counter;
@@ -412,10 +427,12 @@ struct intel_crtc {
wait_queue_head_t vbl_wait;
int scanline_offset;
+ struct intel_mmio_flip mmio_flip;
};
struct intel_plane_wm_parameters {
uint32_t horiz_pixels;
+ uint32_t vert_pixels;
uint8_t bytes_per_pixel;
bool enabled;
bool scaled;
@@ -428,7 +445,6 @@ struct intel_plane {
struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale;
- u32 lut_r[1024], lut_g[1024], lut_b[1024];
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y;
@@ -481,6 +497,7 @@ struct cxsr_latency {
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
+#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
struct intel_hdmi {
u32 hdmi_reg;
@@ -491,6 +508,7 @@ struct intel_hdmi {
bool has_audio;
enum hdmi_force_audio force_audio;
bool rgb_quant_range_selectable;
+ enum hdmi_picture_aspect aspect_ratio;
void (*write_infoframe)(struct drm_encoder *encoder,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len);
@@ -499,6 +517,7 @@ struct intel_hdmi {
struct drm_display_mode *adjusted_mode);
};
+struct intel_dp_mst_encoder;
#define DP_MAX_DOWNSTREAM_PORTS 0x10
/**
@@ -537,12 +556,20 @@ struct intel_dp {
unsigned long last_power_cycle;
unsigned long last_power_on;
unsigned long last_backlight_off;
- bool psr_setup_done;
+
struct notifier_block edp_notifier;
bool use_tps3;
+ bool can_mst; /* this port supports mst */
+ bool is_mst;
+ int active_mst_links;
+ /* connector directly attached - won't be use for modeset in mst world */
struct intel_connector *attached_connector;
+ /* mst connector list */
+ struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
+ struct drm_dp_mst_topology_mgr mst_mgr;
+
uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index);
/*
* This function returns the value we have to program the AUX_CTL
@@ -566,6 +593,14 @@ struct intel_digital_port {
u32 saved_port_bits;
struct intel_dp dp;
struct intel_hdmi hdmi;
+ bool (*hpd_pulse)(struct intel_digital_port *, bool);
+};
+
+struct intel_dp_mst_encoder {
+ struct intel_encoder base;
+ enum pipe pipe;
+ struct intel_digital_port *primary;
+ void *port; /* store this opaque as its illegal to dereference it */
};
static inline int
@@ -652,6 +687,12 @@ enc_to_dig_port(struct drm_encoder *encoder)
return container_of(encoder, struct intel_digital_port, base.base);
}
+static inline struct intel_dp_mst_encoder *
+enc_to_mst(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct intel_dp_mst_encoder, base.base);
+}
+
static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
return &enc_to_dig_port(encoder)->dp;
@@ -676,17 +717,26 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable);
-void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
+static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
+{
+ /*
+ * We only use drm_irq_uninstall() at unload and VT switch, so
+ * this is the only thing we need to check.
+ */
+ return !dev_priv->pm._irqs_disabled;
+}
+
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void i9xx_check_fifo_underruns(struct drm_device *dev);
-
+void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
/* intel_crt.c */
void intel_crt_init(struct drm_device *dev);
@@ -705,10 +755,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
-void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
bool intel_ddi_pll_select(struct intel_crtc *crtc);
-void intel_ddi_pll_enable(struct intel_crtc *crtc);
-void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
@@ -716,17 +763,46 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
+void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
+void intel_ddi_clock_get(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config);
+void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
/* intel_display.c */
const char *intel_output_name(int output);
bool intel_has_pending_fb_unpin(struct drm_device *dev);
int intel_pch_rawclk(struct drm_device *dev);
-int valleyview_cur_cdclk(struct drm_i915_private *dev_priv);
void intel_mark_busy(struct drm_device *dev);
-void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring);
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+ struct intel_engine_cs *ring);
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_frontbuffer_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+/**
+ * intel_frontbuffer_flip - prepare frontbuffer flip
+ * @dev: DRM device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called after scheduling a flip on @obj. This is for
+ * synchronous plane updates which will happen on the next vblank and which will
+ * not get delayed by pending gpu rendering.
+ *
+ * Can be called without any locks held.
+ */
+static inline
+void intel_frontbuffer_flip(struct drm_device *dev,
+ unsigned frontbuffer_bits)
+{
+ intel_frontbuffer_flush(dev, frontbuffer_bits);
+}
+
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
+void intel_crtc_control(struct drm_crtc *crtc, bool enable);
void intel_crtc_update_dpms(struct drm_crtc *crtc);
void intel_encoder_destroy(struct drm_encoder *encoder);
void intel_connector_dpms(struct drm_connector *, int mode);
@@ -767,12 +843,18 @@ __intel_framebuffer_create(struct drm_device *dev,
void intel_prepare_page_flip(struct drm_device *dev, int plane);
void intel_finish_page_flip(struct drm_device *dev, int pipe);
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
+
+/* shared dpll functions */
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
+void intel_put_shared_dpll(struct intel_crtc *crtc);
+
+/* modesetting asserts */
void assert_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state);
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
@@ -805,7 +887,6 @@ void hsw_disable_ips(struct intel_crtc *crtc);
void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
enum intel_display_power_domain
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
-int valleyview_get_vco(struct drm_i915_private *dev_priv);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_config *pipe_config);
int intel_format_to_fourcc(int format);
@@ -826,6 +907,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
bool intel_dp_is_edp(struct drm_device *dev, enum port port);
+bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
+ bool long_hpd);
void intel_edp_backlight_on(struct intel_dp *intel_dp);
void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
@@ -833,11 +916,24 @@ void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
void intel_edp_psr_enable(struct intel_dp *intel_dp);
void intel_edp_psr_disable(struct intel_dp *intel_dp);
-void intel_edp_psr_update(struct drm_device *dev);
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
-
+void intel_edp_psr_invalidate(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_edp_psr_flush(struct drm_device *dev,
+ unsigned frontbuffer_bits);
+void intel_edp_psr_init(struct drm_device *dev);
+
+int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd);
+void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
+void intel_dp_mst_suspend(struct drm_device *dev);
+void intel_dp_mst_resume(struct drm_device *dev);
+int intel_dp_max_link_bw(struct intel_dp *intel_dp);
+void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
+/* intel_dp_mst.c */
+int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
+void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
/* intel_dsi.c */
-bool intel_dsi_init(struct drm_device *dev);
+void intel_dsi_init(struct drm_device *dev);
/* intel_dvo.c */
@@ -920,8 +1016,8 @@ void intel_pch_panel_fitting(struct intel_crtc *crtc,
void intel_gmch_panel_fitting(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config,
int fitting_mode);
-void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
- u32 max);
+void intel_panel_set_backlight_acpi(struct intel_connector *connector,
+ u32 level, u32 max);
int intel_panel_setup_backlight(struct drm_connector *connector);
void intel_panel_enable_backlight(struct intel_connector *connector);
void intel_panel_disable_backlight(struct intel_connector *connector);
@@ -940,7 +1036,9 @@ int ilk_wm_max_level(const struct drm_device *dev);
void intel_update_watermarks(struct drm_crtc *crtc);
void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
+ uint32_t sprite_width,
+ uint32_t sprite_height,
+ int pixel_size,
bool enabled, bool scaled);
void intel_init_pm(struct drm_device *dev);
void intel_pm_setup(struct drm_device *dev);
@@ -963,6 +1061,7 @@ void intel_init_gt_powersave(struct drm_device *dev);
void intel_cleanup_gt_powersave(struct drm_device *dev);
void intel_enable_gt_powersave(struct drm_device *dev);
void intel_disable_gt_powersave(struct drm_device *dev);
+void intel_suspend_gt_powersave(struct drm_device *dev);
void intel_reset_gt_powersave(struct drm_device *dev);
void ironlake_teardown_rc6(struct drm_device *dev);
void gen6_update_ring_freq(struct drm_device *dev);
@@ -976,8 +1075,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_device *dev);
-void __vlv_set_power_well(struct drm_i915_private *dev_priv,
- enum punit_power_well power_well_id, bool enable);
+
/* intel_sdvo.c */
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 3fd082933c87..bfcefbf33709 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -658,7 +658,7 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
};
-bool intel_dsi_init(struct drm_device *dev)
+void intel_dsi_init(struct drm_device *dev)
{
struct intel_dsi *intel_dsi;
struct intel_encoder *intel_encoder;
@@ -674,29 +674,29 @@ bool intel_dsi_init(struct drm_device *dev)
/* There is no detection method for MIPI so rely on VBT */
if (!dev_priv->vbt.has_mipi)
- return false;
+ return;
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
+ } else {
+ DRM_ERROR("Unsupported Mipi device to reg base");
+ return;
+ }
intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
if (!intel_dsi)
- return false;
+ return;
intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dsi);
- return false;
+ return;
}
intel_encoder = &intel_dsi->base;
encoder = &intel_encoder->base;
intel_dsi->attached_connector = intel_connector;
- if (IS_VALLEYVIEW(dev)) {
- dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
- } else {
- DRM_ERROR("Unsupported Mipi device to reg base");
- return false;
- }
-
connector = &intel_connector->base;
drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
@@ -743,7 +743,7 @@ bool intel_dsi_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, intel_encoder);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
if (!fixed_mode) {
@@ -754,12 +754,10 @@ bool intel_dsi_init(struct drm_device *dev)
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
- return true;
+ return;
err:
drm_encoder_cleanup(&intel_encoder->base);
kfree(intel_dsi);
kfree(intel_connector);
-
- return false;
}
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 21a0d348cedc..47c7584a4aa0 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -143,7 +143,7 @@ static u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, u8 *data)
case MIPI_DSI_DCS_LONG_WRITE:
dsi_vc_dcs_write(intel_dsi, vc, data, len);
break;
- };
+ }
data += len;
@@ -294,7 +294,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
intel_dsi->init_count = mipi_config->master_init_timer;
intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
- intel_dsi->video_frmt_cfg_bits = mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+ intel_dsi->video_frmt_cfg_bits =
+ mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
switch (intel_dsi->escape_clk_div) {
case 0:
@@ -351,7 +352,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
*
* prepare count
*/
- ths_prepare_ns = max(mipi_config->ths_prepare, mipi_config->tclk_prepare);
+ ths_prepare_ns = max(mipi_config->ths_prepare,
+ mipi_config->tclk_prepare);
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2);
/* exit zero count */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a3631c0a5c28..56b47d2ffaf7 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -112,7 +112,15 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
+ u32 tmp;
+
+ tmp = I915_READ(intel_dvo->dev.dvo_reg);
+
+ if (!(tmp & DVO_ENABLE))
+ return false;
return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
}
@@ -558,7 +566,7 @@ void intel_dvo_init(struct drm_device *dev)
intel_dvo->panel_wants_dither = true;
}
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return;
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 088fe9378a4c..f475414671d8 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -43,10 +43,36 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+static int intel_fbdev_set_par(struct fb_info *info)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct intel_fbdev *ifbdev =
+ container_of(fb_helper, struct intel_fbdev, helper);
+ int ret;
+
+ ret = drm_fb_helper_set_par(info);
+
+ if (ret == 0) {
+ /*
+ * FIXME: fbdev presumes that all callbacks also work from
+ * atomic contexts and relies on that for emergency oops
+ * printing. KMS totally doesn't do that and the locking here is
+ * by far not the only place this goes wrong. Ignore this for
+ * now until we solve this for real.
+ */
+ mutex_lock(&fb_helper->dev->struct_mutex);
+ ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
+ true);
+ mutex_unlock(&fb_helper->dev->struct_mutex);
+ }
+
+ return ret;
+}
+
static struct fb_ops intelfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
+ .fb_set_par = intel_fbdev_set_par,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
@@ -81,7 +107,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
- size = ALIGN(size, PAGE_SIZE);
+ size = PAGE_ALIGN(size);
obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL)
obj = i915_gem_alloc_object(dev, size);
@@ -417,7 +443,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
}
crtcs[i] = new_crtc;
- DRM_DEBUG_KMS("connector %s on pipe %d [CRTC:%d]: %dx%d%s\n",
+ DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n",
connector->name,
pipe_name(to_intel_crtc(encoder->crtc)->pipe),
encoder->crtc->base.id,
@@ -452,7 +478,7 @@ out:
return true;
}
-static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.initial_config = intel_fb_initial_config,
.gamma_set = intel_crtc_fb_gamma_set,
.gamma_get = intel_crtc_fb_gamma_get,
@@ -623,7 +649,8 @@ int intel_fbdev_init(struct drm_device *dev)
if (ifbdev == NULL)
return -ENOMEM;
- ifbdev->helper.funcs = &intel_fb_helper_funcs;
+ drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
+
if (!intel_fbdev_init_bios(dev, ifbdev))
ifbdev->preferred_bpp = 32;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index eee2bbec2958..f9151f6641d9 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -367,6 +367,9 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
union hdmi_infoframe frame;
int ret;
+ /* Set user selected PAR to incoming mode's member */
+ adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
+
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
adjusted_mode);
if (ret < 0) {
@@ -879,7 +882,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
struct intel_encoder *encoder;
int count = 0, count_hdmi = 0;
- if (!HAS_PCH_SPLIT(dev))
+ if (HAS_GMCH_DISPLAY(dev))
return false;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
@@ -1124,6 +1127,23 @@ intel_hdmi_set_property(struct drm_connector *connector,
goto done;
}
+ if (property == connector->dev->mode_config.aspect_ratio_property) {
+ switch (val) {
+ case DRM_MODE_PICTURE_ASPECT_NONE:
+ intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+ break;
+ case DRM_MODE_PICTURE_ASPECT_4_3:
+ intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
+ break;
+ case DRM_MODE_PICTURE_ASPECT_16_9:
+ intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
+ break;
+ default:
+ return -EINVAL;
+ }
+ goto done;
+ }
+
return -EINVAL;
done:
@@ -1229,6 +1249,70 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
}
+static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(encoder->base.crtc);
+ enum dpio_channel ch = vlv_dport_to_channel(dport);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ /* program left/right clock distribution */
+ if (pipe != PIPE_B) {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA1_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA1_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ } else {
+ val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
+ if (ch == DPIO_CH0)
+ val |= CHV_BUFLEFTENA2_FORCE;
+ if (ch == DPIO_CH1)
+ val |= CHV_BUFRIGHTENA2_FORCE;
+ vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ }
+
+ /* program clock channel usage */
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+ val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
+ if (pipe != PIPE_B)
+ val &= ~CHV_PCS_USEDCLKCHANNEL;
+ else
+ val |= CHV_PCS_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+
+ /*
+ * This a a bit weird since generally CL
+ * matches the pipe, but here we need to
+ * pick the CL based on the port.
+ */
+ val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+ if (pipe != PIPE_B)
+ val &= ~CHV_CMN_USEDCLKCHANNEL;
+ else
+ val |= CHV_CMN_USEDCLKCHANNEL;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
@@ -1416,11 +1500,22 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
};
static void
+intel_attach_aspect_ratio_property(struct drm_connector *connector)
+{
+ if (!drm_mode_create_aspect_ratio_property(connector->dev))
+ drm_object_attach_property(&connector->base,
+ connector->dev->mode_config.aspect_ratio_property,
+ DRM_MODE_PICTURE_ASPECT_NONE);
+}
+
+static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_hdmi->color_range_auto = true;
+ intel_attach_aspect_ratio_property(connector);
+ intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
@@ -1467,7 +1562,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
if (IS_VALLEYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes;
- } else if (!HAS_PCH_SPLIT(dev)) {
+ } else if (IS_G4X(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
} else if (HAS_DDI(dev)) {
@@ -1490,7 +1585,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_hdmi_add_properties(intel_hdmi, connector);
intel_connector_attach_encoder(intel_connector, intel_encoder);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
@@ -1528,6 +1623,7 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
if (IS_CHERRYVIEW(dev)) {
+ intel_encoder->pre_pll_enable = chv_hdmi_pre_pll_enable;
intel_encoder->pre_enable = chv_hdmi_pre_enable;
intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = chv_hdmi_post_disable;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d33b61d0dd33..b31088a551f2 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -34,11 +34,6 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-enum disp_clk {
- CDCLK,
- CZCLK
-};
-
struct gmbus_port {
const char *name;
int reg;
@@ -63,60 +58,11 @@ to_intel_gmbus(struct i2c_adapter *i2c)
return container_of(i2c, struct intel_gmbus, adapter);
}
-static int get_disp_clk_div(struct drm_i915_private *dev_priv,
- enum disp_clk clk)
-{
- u32 reg_val;
- int clk_ratio;
-
- reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
-
- if (clk == CDCLK)
- clk_ratio =
- ((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
- else
- clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
-
- return clk_ratio;
-}
-
-static void gmbus_set_freq(struct drm_i915_private *dev_priv)
-{
- int vco, gmbus_freq = 0, cdclk_div;
-
- BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
-
- vco = valleyview_get_vco(dev_priv);
-
- /* Get the CDCLK divide ratio */
- cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
-
- /*
- * Program the gmbus_freq based on the cdclk frequency.
- * BSpec erroneously claims we should aim for 4MHz, but
- * in fact 1MHz is the correct frequency.
- */
- if (cdclk_div)
- gmbus_freq = (vco << 1) / cdclk_div;
-
- if (WARN_ON(gmbus_freq == 0))
- return;
-
- I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
-}
-
void
intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- /*
- * In BIOS-less system, program the correct gmbus frequency
- * before reading edid.
- */
- if (IS_VALLEYVIEW(dev))
- gmbus_set_freq(dev_priv);
-
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 5e5a72fca5fb..881361c0f27e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -51,6 +51,7 @@ struct intel_lvds_encoder {
bool is_dual_link;
u32 reg;
+ u32 a3_power;
struct intel_lvds_connector *attached_connector;
};
@@ -71,8 +72,13 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ enum intel_display_power_domain power_domain;
u32 tmp;
+ power_domain = intel_display_port_power_domain(encoder);
+ if (!intel_display_power_enabled(dev_priv, power_domain))
+ return false;
+
tmp = I915_READ(lvds_encoder->reg);
if (!(tmp & LVDS_PORT_EN))
@@ -172,8 +178,11 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
- * panels behave in the two modes.
+ * panels behave in the two modes. For now, let's just maintain the
+ * value we got from the BIOS.
*/
+ temp &= ~LVDS_A3_POWER_MASK;
+ temp |= lvds_encoder->a3_power;
/* Set the dithering flag on LVDS as needed, note that there is no
* special lvds dither control bit on pch-split platforms, dithering is
@@ -271,7 +280,6 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_device *dev = intel_encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_encoder *lvds_encoder =
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
@@ -286,8 +294,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
return false;
}
- if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) ==
- LVDS_A3_POWER_UP)
+ if (lvds_encoder->a3_power == LVDS_A3_POWER_UP)
lvds_bpp = 8*3;
else
lvds_bpp = 6*3;
@@ -1088,6 +1095,9 @@ out:
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
+ lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
+ LVDS_A3_POWER_MASK;
+
/*
* Unlock registers and just
* leave them unlocked
@@ -1104,7 +1114,7 @@ out:
DRM_DEBUG_KMS("lid notifier registration failed\n");
lvds_connector->lid_notifier.notifier_call = NULL;
}
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 4f6b53998d79..ca52ad2ae7d1 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -352,6 +352,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
case INTEL_OUTPUT_UNKNOWN:
case INTEL_OUTPUT_DISPLAYPORT:
case INTEL_OUTPUT_HDMI:
+ case INTEL_OUTPUT_DP_MST:
type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
break;
case INTEL_OUTPUT_EDP:
@@ -427,7 +428,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
*/
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head)
- intel_panel_set_backlight(intel_connector, bclp, 255);
+ intel_panel_set_backlight_acpi(intel_connector, bclp, 255);
iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index daa118978eec..dc2f4f26c961 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -415,6 +415,10 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
}
intel_overlay_release_old_vid_tail(overlay);
+
+
+ i915_gem_track_fb(overlay->old_vid_bo, NULL,
+ INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
return 0;
}
@@ -686,6 +690,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
bool scale_changed = false;
struct drm_device *dev = overlay->dev;
u32 swidth, swidthsw, sheight, ostride;
+ enum pipe pipe = overlay->crtc->pipe;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
@@ -713,7 +718,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
oconfig = OCONF_CC_OUT_8BIT;
if (IS_GEN4(overlay->dev))
oconfig |= OCONF_CSC_MODE_BT709;
- oconfig |= overlay->crtc->pipe == 0 ?
+ oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
iowrite32(oconfig, &regs->OCONFIG);
intel_overlay_unmap_regs(overlay, regs);
@@ -776,9 +781,15 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret)
goto out_unpin;
+ i915_gem_track_fb(overlay->vid_bo, new_bo,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
+
overlay->old_vid_bo = overlay->vid_bo;
overlay->vid_bo = new_bo;
+ intel_frontbuffer_flip(dev,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
+
return 0;
out_unpin:
@@ -1028,7 +1039,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_intel_overlay_put_image *put_image_rec = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
- struct drm_mode_object *drmmode_obj;
+ struct drm_crtc *drmmode_crtc;
struct intel_crtc *crtc;
struct drm_i915_gem_object *new_bo;
struct put_image_params *params;
@@ -1057,13 +1068,12 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
if (!params)
return -ENOMEM;
- drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
- DRM_MODE_OBJECT_CRTC);
- if (!drmmode_obj) {
+ drmmode_crtc = drm_crtc_find(dev, put_image_rec->crtc_id);
+ if (!drmmode_crtc) {
ret = -ENOENT;
goto out_free;
}
- crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+ crtc = to_intel_crtc(drmmode_crtc);
new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
put_image_rec->bo_handle));
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 12b02fe1d0ae..59b028f0b1e8 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -398,6 +398,69 @@ intel_panel_detect(struct drm_device *dev)
}
}
+/**
+ * scale - scale values from one range to another
+ *
+ * @source_val: value in range [@source_min..@source_max]
+ *
+ * Return @source_val in range [@source_min..@source_max] scaled to range
+ * [@target_min..@target_max].
+ */
+static uint32_t scale(uint32_t source_val,
+ uint32_t source_min, uint32_t source_max,
+ uint32_t target_min, uint32_t target_max)
+{
+ uint64_t target_val;
+
+ WARN_ON(source_min > source_max);
+ WARN_ON(target_min > target_max);
+
+ /* defensive */
+ source_val = clamp(source_val, source_min, source_max);
+
+ /* avoid overflows */
+ target_val = (uint64_t)(source_val - source_min) *
+ (target_max - target_min);
+ do_div(target_val, source_max - source_min);
+ target_val += target_min;
+
+ return target_val;
+}
+
+/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
+static inline u32 scale_user_to_hw(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return scale(user_level, 0, user_max,
+ panel->backlight.min, panel->backlight.max);
+}
+
+/* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
+ * to [hw_min..hw_max]. */
+static inline u32 clamp_user_to_hw(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+ u32 hw_level;
+
+ hw_level = scale(user_level, 0, user_max, 0, panel->backlight.max);
+ hw_level = clamp(hw_level, panel->backlight.min, panel->backlight.max);
+
+ return hw_level;
+}
+
+/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
+static inline u32 scale_hw_to_user(struct intel_connector *connector,
+ u32 hw_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return scale(hw_level, panel->backlight.min, panel->backlight.max,
+ 0, user_max);
+}
+
static u32 intel_panel_compute_brightness(struct intel_connector *connector,
u32 val)
{
@@ -557,17 +620,16 @@ intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
dev_priv->display.set_backlight(connector, level);
}
-/* set backlight brightness to level in range [0..max] */
-void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
- u32 max)
+/* set backlight brightness to level in range [0..max], scaling wrt hw min */
+static void intel_panel_set_backlight(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
{
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_panel *panel = &connector->panel;
enum pipe pipe = intel_get_pipe_from_connector(connector);
- u32 freq;
+ u32 hw_level;
unsigned long flags;
- u64 n;
if (!panel->backlight.present || pipe == INVALID_PIPE)
return;
@@ -576,18 +638,46 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level,
WARN_ON(panel->backlight.max == 0);
- /* scale to hardware max, but be careful to not overflow */
- freq = panel->backlight.max;
- n = (u64)level * freq;
- do_div(n, max);
- level = n;
+ hw_level = scale_user_to_hw(connector, user_level, user_max);
+ panel->backlight.level = hw_level;
+
+ if (panel->backlight.enabled)
+ intel_panel_actually_set_backlight(connector, hw_level);
+
+ spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
+}
+
+/* set backlight brightness to level in range [0..max], assuming hw min is
+ * respected.
+ */
+void intel_panel_set_backlight_acpi(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = intel_get_pipe_from_connector(connector);
+ u32 hw_level;
+ unsigned long flags;
+
+ if (!panel->backlight.present || pipe == INVALID_PIPE)
+ return;
+
+ spin_lock_irqsave(&dev_priv->backlight_lock, flags);
+
+ WARN_ON(panel->backlight.max == 0);
+
+ hw_level = clamp_user_to_hw(connector, user_level, user_max);
+ panel->backlight.level = hw_level;
- panel->backlight.level = level;
if (panel->backlight.device)
- panel->backlight.device->props.brightness = level;
+ panel->backlight.device->props.brightness =
+ scale_hw_to_user(connector,
+ panel->backlight.level,
+ panel->backlight.device->props.max_brightness);
if (panel->backlight.enabled)
- intel_panel_actually_set_backlight(connector, level);
+ intel_panel_actually_set_backlight(connector, hw_level);
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
}
@@ -860,7 +950,9 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
panel->backlight.level = panel->backlight.max;
if (panel->backlight.device)
panel->backlight.device->props.brightness =
- panel->backlight.level;
+ scale_hw_to_user(connector,
+ panel->backlight.level,
+ panel->backlight.device->props.max_brightness);
}
dev_priv->display.enable_backlight(connector);
@@ -889,11 +981,15 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
struct intel_connector *connector = bl_get_data(bd);
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 hw_level;
int ret;
intel_runtime_pm_get(dev_priv);
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- ret = intel_panel_get_backlight(connector);
+
+ hw_level = intel_panel_get_backlight(connector);
+ ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness);
+
drm_modeset_unlock(&dev->mode_config.connection_mutex);
intel_runtime_pm_put(dev_priv);
@@ -913,12 +1009,19 @@ static int intel_backlight_device_register(struct intel_connector *connector)
if (WARN_ON(panel->backlight.device))
return -ENODEV;
- BUG_ON(panel->backlight.max == 0);
+ WARN_ON(panel->backlight.max == 0);
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
- props.brightness = panel->backlight.level;
+
+ /*
+ * Note: Everything should work even if the backlight device max
+ * presented to the userspace is arbitrarily chosen.
+ */
props.max_brightness = panel->backlight.max;
+ props.brightness = scale_hw_to_user(connector,
+ panel->backlight.level,
+ props.max_brightness);
/*
* Note: using the same name independent of the connector prevents
@@ -964,6 +1067,19 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
* XXX: Query mode clock or hardware clock and program PWM modulation frequency
* appropriately when it's 0. Use VBT and/or sane defaults.
*/
+static u32 get_backlight_min_vbt(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+
+ WARN_ON(panel->backlight.max == 0);
+
+ /* vbt value is a coefficient in range [0..255] */
+ return scale(dev_priv->vbt.backlight.min_brightness, 0, 255,
+ 0, panel->backlight.max);
+}
+
static int bdw_setup_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
@@ -979,6 +1095,8 @@ static int bdw_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = bdw_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
@@ -1003,6 +1121,8 @@ static int pch_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = pch_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
@@ -1035,6 +1155,8 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = i9xx_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
@@ -1062,6 +1184,8 @@ static int i965_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = i9xx_get_backlight(connector);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
@@ -1099,6 +1223,8 @@ static int vlv_setup_backlight(struct intel_connector *connector)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = _vlv_get_backlight(dev, PIPE_A);
panel->backlight.level = intel_panel_compute_brightness(connector, val);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f1233f544f3e..c3bb925b2e65 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -93,8 +93,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int cfb_pitch;
int i;
@@ -150,8 +149,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
@@ -222,16 +220,26 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dev_priv->fbc.threshold++;
+
+ switch (dev_priv->fbc.threshold) {
+ case 4:
+ case 3:
+ dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+ break;
+ case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- else
+ break;
+ case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ break;
+ }
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev))
dpfc_ctl |= obj->fence_reg;
@@ -278,16 +286,27 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->primary->fb;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 dpfc_ctl;
dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+ dev_priv->fbc.threshold++;
+
+ switch (dev_priv->fbc.threshold) {
+ case 4:
+ case 3:
+ dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+ break;
+ case 2:
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
- else
+ break;
+ case 1:
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+ break;
+ }
+
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -462,7 +481,6 @@ void intel_update_fbc(struct drm_device *dev)
struct drm_crtc *crtc = NULL, *tmp_crtc;
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
- struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
const struct drm_display_mode *adjusted_mode;
unsigned int max_width, max_height;
@@ -507,8 +525,7 @@ void intel_update_fbc(struct drm_device *dev)
intel_crtc = to_intel_crtc(crtc);
fb = crtc->primary->fb;
- intel_fb = to_intel_framebuffer(fb);
- obj = intel_fb->obj;
+ obj = intel_fb_obj(fb);
adjusted_mode = &intel_crtc->config.adjusted_mode;
if (i915.enable_fbc < 0) {
@@ -529,7 +546,10 @@ void intel_update_fbc(struct drm_device *dev)
goto out_disable;
}
- if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+ if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
+ max_width = 4096;
+ max_height = 4096;
+ } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
max_width = 4096;
max_height = 2048;
} else {
@@ -563,7 +583,8 @@ void intel_update_fbc(struct drm_device *dev)
if (in_dbg_master())
goto out_disable;
- if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
+ if (i915_gem_stolen_setup_compression(dev, obj->base.size,
+ drm_format_plane_cpp(fb->pixel_format, 0))) {
if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
goto out_disable;
@@ -789,12 +810,33 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
return NULL;
}
-static void pineview_disable_cxsr(struct drm_device *dev)
+void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = dev_priv->dev;
+ u32 val;
+
+ if (IS_VALLEYVIEW(dev)) {
+ I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+ } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
+ I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+ } else if (IS_PINEVIEW(dev)) {
+ val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
+ val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
+ I915_WRITE(DSPFW3, val);
+ } else if (IS_I945G(dev) || IS_I945GM(dev)) {
+ val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
+ _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
+ I915_WRITE(FW_BLC_SELF, val);
+ } else if (IS_I915GM(dev)) {
+ val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
+ _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
+ I915_WRITE(INSTPM, val);
+ } else {
+ return;
+ }
- /* deactivate cxsr */
- I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
+ DRM_DEBUG_KMS("memory self-refresh is %s\n",
+ enable ? "enabled" : "disabled");
}
/*
@@ -864,95 +906,95 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
/* Pineview has different values for various configs */
static const struct intel_watermark_params pineview_display_wm = {
- PINEVIEW_DISPLAY_FIFO,
- PINEVIEW_MAX_WM,
- PINEVIEW_DFT_WM,
- PINEVIEW_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
+ .fifo_size = PINEVIEW_DISPLAY_FIFO,
+ .max_wm = PINEVIEW_MAX_WM,
+ .default_wm = PINEVIEW_DFT_WM,
+ .guard_size = PINEVIEW_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params pineview_display_hplloff_wm = {
- PINEVIEW_DISPLAY_FIFO,
- PINEVIEW_MAX_WM,
- PINEVIEW_DFT_HPLLOFF_WM,
- PINEVIEW_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
+ .fifo_size = PINEVIEW_DISPLAY_FIFO,
+ .max_wm = PINEVIEW_MAX_WM,
+ .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
+ .guard_size = PINEVIEW_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params pineview_cursor_wm = {
- PINEVIEW_CURSOR_FIFO,
- PINEVIEW_CURSOR_MAX_WM,
- PINEVIEW_CURSOR_DFT_WM,
- PINEVIEW_CURSOR_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE,
+ .fifo_size = PINEVIEW_CURSOR_FIFO,
+ .max_wm = PINEVIEW_CURSOR_MAX_WM,
+ .default_wm = PINEVIEW_CURSOR_DFT_WM,
+ .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
- PINEVIEW_CURSOR_FIFO,
- PINEVIEW_CURSOR_MAX_WM,
- PINEVIEW_CURSOR_DFT_WM,
- PINEVIEW_CURSOR_GUARD_WM,
- PINEVIEW_FIFO_LINE_SIZE
+ .fifo_size = PINEVIEW_CURSOR_FIFO,
+ .max_wm = PINEVIEW_CURSOR_MAX_WM,
+ .default_wm = PINEVIEW_CURSOR_DFT_WM,
+ .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+ .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params g4x_wm_info = {
- G4X_FIFO_SIZE,
- G4X_MAX_WM,
- G4X_MAX_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
+ .fifo_size = G4X_FIFO_SIZE,
+ .max_wm = G4X_MAX_WM,
+ .default_wm = G4X_MAX_WM,
+ .guard_size = 2,
+ .cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params g4x_cursor_wm_info = {
- I965_CURSOR_FIFO,
- I965_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
+ .fifo_size = I965_CURSOR_FIFO,
+ .max_wm = I965_CURSOR_MAX_WM,
+ .default_wm = I965_CURSOR_DFT_WM,
+ .guard_size = 2,
+ .cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params valleyview_wm_info = {
- VALLEYVIEW_FIFO_SIZE,
- VALLEYVIEW_MAX_WM,
- VALLEYVIEW_MAX_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
+ .fifo_size = VALLEYVIEW_FIFO_SIZE,
+ .max_wm = VALLEYVIEW_MAX_WM,
+ .default_wm = VALLEYVIEW_MAX_WM,
+ .guard_size = 2,
+ .cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params valleyview_cursor_wm_info = {
- I965_CURSOR_FIFO,
- VALLEYVIEW_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- G4X_FIFO_LINE_SIZE,
+ .fifo_size = I965_CURSOR_FIFO,
+ .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
+ .default_wm = I965_CURSOR_DFT_WM,
+ .guard_size = 2,
+ .cacheline_size = G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i965_cursor_wm_info = {
- I965_CURSOR_FIFO,
- I965_CURSOR_MAX_WM,
- I965_CURSOR_DFT_WM,
- 2,
- I915_FIFO_LINE_SIZE,
+ .fifo_size = I965_CURSOR_FIFO,
+ .max_wm = I965_CURSOR_MAX_WM,
+ .default_wm = I965_CURSOR_DFT_WM,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i945_wm_info = {
- I945_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I915_FIFO_LINE_SIZE
+ .fifo_size = I945_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i915_wm_info = {
- I915_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I915_FIFO_LINE_SIZE
+ .fifo_size = I915_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i830_wm_info = {
- I855GM_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I830_FIFO_LINE_SIZE
+ .fifo_size = I855GM_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I830_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i845_wm_info = {
- I830_FIFO_SIZE,
- I915_MAX_WM,
- 1,
- 2,
- I830_FIFO_LINE_SIZE
+ .fifo_size = I830_FIFO_SIZE,
+ .max_wm = I915_MAX_WM,
+ .default_wm = 1,
+ .guard_size = 2,
+ .cacheline_size = I830_FIFO_LINE_SIZE,
};
/**
@@ -1033,7 +1075,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
dev_priv->fsb_freq, dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
- pineview_disable_cxsr(dev);
+ intel_set_memory_cxsr(dev_priv, false);
return;
}
@@ -1084,13 +1126,9 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(DSPFW3, reg);
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
- /* activate cxsr */
- I915_WRITE(DSPFW3,
- I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
- DRM_DEBUG_KMS("Self-refresh is enabled\n");
+ intel_set_memory_cxsr(dev_priv, true);
} else {
- pineview_disable_cxsr(dev);
- DRM_DEBUG_KMS("Self-refresh is disabled\n");
+ intel_set_memory_cxsr(dev_priv, false);
}
}
@@ -1316,6 +1354,7 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
int plane_sr, cursor_sr;
int ignore_plane_sr, ignore_cursor_sr;
unsigned int enabled = 0;
+ bool cxsr_enabled;
vlv_update_drain_latency(dev);
@@ -1342,10 +1381,10 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
&valleyview_wm_info,
&valleyview_cursor_wm_info,
&ignore_plane_sr, &cursor_sr)) {
- I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
+ cxsr_enabled = true;
} else {
- I915_WRITE(FW_BLC_SELF_VLV,
- I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+ cxsr_enabled = false;
+ intel_set_memory_cxsr(dev_priv, false);
plane_sr = cursor_sr = 0;
}
@@ -1365,6 +1404,9 @@ static void valleyview_update_wm(struct drm_crtc *crtc)
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
static void g4x_update_wm(struct drm_crtc *crtc)
@@ -1375,6 +1417,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
unsigned int enabled = 0;
+ bool cxsr_enabled;
if (g4x_compute_wm0(dev, PIPE_A,
&g4x_wm_info, latency_ns,
@@ -1394,10 +1437,10 @@ static void g4x_update_wm(struct drm_crtc *crtc)
&g4x_wm_info,
&g4x_cursor_wm_info,
&plane_sr, &cursor_sr)) {
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ cxsr_enabled = true;
} else {
- I915_WRITE(FW_BLC_SELF,
- I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+ cxsr_enabled = false;
+ intel_set_memory_cxsr(dev_priv, false);
plane_sr = cursor_sr = 0;
}
@@ -1418,6 +1461,9 @@ static void g4x_update_wm(struct drm_crtc *crtc)
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
static void i965_update_wm(struct drm_crtc *unused_crtc)
@@ -1427,6 +1473,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
struct drm_crtc *crtc;
int srwm = 1;
int cursor_sr = 16;
+ bool cxsr_enabled;
/* Calc sr entries for one plane configs */
crtc = single_enabled_crtc(dev);
@@ -1468,13 +1515,11 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
"cursor %d\n", srwm, cursor_sr);
- if (IS_CRESTLINE(dev))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ cxsr_enabled = true;
} else {
+ cxsr_enabled = false;
/* Turn off self refresh if both pipes are enabled */
- if (IS_CRESTLINE(dev))
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
+ intel_set_memory_cxsr(dev_priv, false);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -1486,6 +1531,9 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
/* update cursor SR watermark */
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+
+ if (cxsr_enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
static void i9xx_update_wm(struct drm_crtc *unused_crtc)
@@ -1545,12 +1593,12 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
if (IS_I915GM(dev) && enabled) {
- struct intel_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
- fb = to_intel_framebuffer(enabled->primary->fb);
+ obj = intel_fb_obj(enabled->primary->fb);
/* self-refresh seems busted with untiled */
- if (fb->obj->tiling_mode == I915_TILING_NONE)
+ if (obj->tiling_mode == I915_TILING_NONE)
enabled = NULL;
}
@@ -1560,10 +1608,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
cwm = 2;
/* Play safe and disable self-refresh before adjusting watermarks. */
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
- else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
+ intel_set_memory_cxsr(dev_priv, false);
/* Calc sr entries for one plane configs */
if (HAS_FW_BLC(dev) && enabled) {
@@ -1609,17 +1654,8 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
I915_WRITE(FW_BLC, fwater_lo);
I915_WRITE(FW_BLC2, fwater_hi);
- if (HAS_FW_BLC(dev)) {
- if (enabled) {
- if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF,
- FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
- else if (IS_I915GM(dev))
- I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
- DRM_DEBUG_KMS("memory self refresh enabled\n");
- } else
- DRM_DEBUG_KMS("memory self refresh disabled\n");
- }
+ if (enabled)
+ intel_set_memory_cxsr(dev_priv, true);
}
static void i845_update_wm(struct drm_crtc *unused_crtc)
@@ -2707,10 +2743,11 @@ static void ilk_update_wm(struct drm_crtc *crtc)
ilk_write_wm_values(dev_priv, &results);
}
-static void ilk_update_sprite_wm(struct drm_plane *plane,
- struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
- bool enabled, bool scaled)
+static void
+ilk_update_sprite_wm(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ uint32_t sprite_width, uint32_t sprite_height,
+ int pixel_size, bool enabled, bool scaled)
{
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
@@ -2718,6 +2755,7 @@ static void ilk_update_sprite_wm(struct drm_plane *plane,
intel_plane->wm.enabled = enabled;
intel_plane->wm.scaled = scaled;
intel_plane->wm.horiz_pixels = sprite_width;
+ intel_plane->wm.vert_pixels = sprite_width;
intel_plane->wm.bytes_per_pixel = pixel_size;
/*
@@ -2852,13 +2890,16 @@ void intel_update_watermarks(struct drm_crtc *crtc)
void intel_update_sprite_watermarks(struct drm_plane *plane,
struct drm_crtc *crtc,
- uint32_t sprite_width, int pixel_size,
+ uint32_t sprite_width,
+ uint32_t sprite_height,
+ int pixel_size,
bool enabled, bool scaled)
{
struct drm_i915_private *dev_priv = plane->dev->dev_private;
if (dev_priv->display.update_sprite_wm)
- dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
+ dev_priv->display.update_sprite_wm(plane, crtc,
+ sprite_width, sprite_height,
pixel_size, enabled, scaled);
}
@@ -3147,6 +3188,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
if (val < dev_priv->rps.max_freq_softlimit)
mask |= GEN6_PM_RP_UP_THRESHOLD;
+ mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
+ mask &= dev_priv->pm_rps_events;
+
/* IVB and SNB hard hangs on looping batchbuffer
* if GEN6_PM_UP_EI_EXPIRED is masked.
*/
@@ -3250,7 +3294,9 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (IS_VALLEYVIEW(dev))
+ if (IS_CHERRYVIEW(dev))
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+ else if (IS_VALLEYVIEW(dev))
vlv_set_rps_idle(dev_priv);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
@@ -3348,6 +3394,15 @@ static void gen6_disable_rps(struct drm_device *dev)
gen6_disable_rps_interrupts(dev);
}
+static void cherryview_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ gen8_disable_rps_interrupts(dev);
+}
+
static void valleyview_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3419,7 +3474,7 @@ static void gen8_enable_rps_interrupts(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
- bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -3430,7 +3485,7 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir);
- snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -3483,15 +3538,23 @@ static void gen8_enable_rps(struct drm_device *dev)
for_each_ring(ring, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0);
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+ if (IS_BROADWELL(dev))
+ I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
+ else
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
/* 3: Enable RC6 */
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
intel_print_rc6_info(dev, rc6_mask);
- I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_EI_MODE(1) |
- rc6_mask);
+ if (IS_BROADWELL(dev))
+ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+ GEN7_RC_CTL_TO_MODE |
+ rc6_mask);
+ else
+ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1) |
+ rc6_mask);
/* 4 Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RPNSWREQ,
@@ -3727,7 +3790,57 @@ void gen6_update_ring_freq(struct drm_device *dev)
mutex_unlock(&dev_priv->rps.hw_lock);
}
-int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
+static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rp0;
+
+ val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+ rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+
+ return rp0;
+}
+
+static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rpe;
+
+ val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
+ rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
+
+ return rpe;
+}
+
+static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rp1;
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+ rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+
+ return rp1;
+}
+
+static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rpn;
+
+ val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+ rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
+ return rpn;
+}
+
+static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rp1;
+
+ val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
+
+ rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
+
+ return rp1;
+}
+
+static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
{
u32 val, rp0;
@@ -3752,7 +3865,7 @@ static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
return rpe;
}
-int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
+static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
{
return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
}
@@ -3766,6 +3879,35 @@ static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
dev_priv->vlv_pctx->stolen->start);
}
+
+/* Check that the pcbr address is not empty. */
+static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
+{
+ unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
+
+ WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
+}
+
+static void cherryview_setup_pctx(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long pctx_paddr, paddr;
+ struct i915_gtt *gtt = &dev_priv->gtt;
+ u32 pcbr;
+ int pctx_size = 32*1024;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ pcbr = I915_READ(VLV_PCBR);
+ if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
+ paddr = (dev_priv->mm.stolen_base +
+ (gtt->stolen_size - pctx_size));
+
+ pctx_paddr = (paddr & (~4095));
+ I915_WRITE(VLV_PCBR, pctx_paddr);
+ }
+}
+
static void valleyview_setup_pctx(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3840,6 +3982,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
dev_priv->rps.efficient_freq);
+ dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
+ DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+ dev_priv->rps.rp1_freq);
+
dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
@@ -3855,11 +4002,142 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
mutex_unlock(&dev_priv->rps.hw_lock);
}
+static void cherryview_init_gt_powersave(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ cherryview_setup_pctx(dev);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+
+ dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
+ dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+ DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+ dev_priv->rps.max_freq);
+
+ dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
+ DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ dev_priv->rps.efficient_freq);
+
+ dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
+ DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+ dev_priv->rps.rp1_freq);
+
+ dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
+ DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+ dev_priv->rps.min_freq);
+
+ /* Preserve min/max settings in case of re-init */
+ if (dev_priv->rps.max_freq_softlimit == 0)
+ dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+
+ if (dev_priv->rps.min_freq_softlimit == 0)
+ dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+
+ mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
{
valleyview_cleanup_pctx(dev);
}
+static void cherryview_enable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *ring;
+ u32 gtfifodbg, val, rc6_mode = 0, pcbr;
+ int i;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+
+ gtfifodbg = I915_READ(GTFIFODBG);
+ if (gtfifodbg) {
+ DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
+ gtfifodbg);
+ I915_WRITE(GTFIFODBG, gtfifodbg);
+ }
+
+ cherryview_check_pctx(dev_priv);
+
+ /* 1a & 1b: Get forcewake during program sequence. Although the driver
+ * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* 2a: Program RC6 thresholds.*/
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+
+ for_each_ring(ring, dev_priv, i)
+ I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+ /* allows RC6 residency counter to work */
+ I915_WRITE(VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
+
+ /* For now we assume BIOS is allocating and populating the PCBR */
+ pcbr = I915_READ(VLV_PCBR);
+
+ DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
+
+ /* 3: Enable RC6 */
+ if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
+ (pcbr >> VLV_PCBR_ADDR_SHIFT))
+ rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+
+ I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
+
+ /* 4 Program defaults and thresholds for RPS*/
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+ I915_WRITE(GEN6_RP_UP_EI, 66000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+
+ /* WaDisablePwrmtrEvent:chv (pre-production hw) */
+ I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
+ I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
+
+ /* 5: Enable RPS */
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
+ GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_AVG);
+
+ val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+
+ DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
+ DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+
+ dev_priv->rps.cur_freq = (val >> 8) & 0xff;
+ DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+ dev_priv->rps.cur_freq);
+
+ DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
+ vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+ dev_priv->rps.efficient_freq);
+
+ valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+
+ gen8_enable_rps_interrupts(dev);
+
+ gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+}
+
static void valleyview_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3886,6 +4164,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
@@ -3906,9 +4185,11 @@ static void valleyview_enable_rps(struct drm_device *dev)
/* allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
+ VLV_RENDER_RC0_COUNT_EN |
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
+
if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
@@ -4666,33 +4947,60 @@ void intel_init_gt_powersave(struct drm_device *dev)
{
i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
- if (IS_VALLEYVIEW(dev))
+ if (IS_CHERRYVIEW(dev))
+ cherryview_init_gt_powersave(dev);
+ else if (IS_VALLEYVIEW(dev))
valleyview_init_gt_powersave(dev);
}
void intel_cleanup_gt_powersave(struct drm_device *dev)
{
- if (IS_VALLEYVIEW(dev))
+ if (IS_CHERRYVIEW(dev))
+ return;
+ else if (IS_VALLEYVIEW(dev))
valleyview_cleanup_gt_powersave(dev);
}
+/**
+ * intel_suspend_gt_powersave - suspend PM work and helper threads
+ * @dev: drm device
+ *
+ * We don't want to disable RC6 or other features here, we just want
+ * to make sure any work we've queued has finished and won't bother
+ * us while we're suspended.
+ */
+void intel_suspend_gt_powersave(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Interrupts should be disabled already to avoid re-arming. */
+ WARN_ON(intel_irqs_enabled(dev_priv));
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ cancel_work_sync(&dev_priv->rps.work);
+
+ /* Force GPU to min freq during suspend */
+ gen6_rps_idle(dev_priv);
+}
+
void intel_disable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* Interrupts should be disabled already to avoid re-arming. */
- WARN_ON(dev->irq_enabled);
+ WARN_ON(intel_irqs_enabled(dev_priv));
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
- } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
- if (cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work))
- intel_runtime_pm_put(dev_priv);
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ intel_suspend_gt_powersave(dev);
- cancel_work_sync(&dev_priv->rps.work);
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev))
+ if (IS_CHERRYVIEW(dev))
+ cherryview_disable_rps(dev);
+ else if (IS_VALLEYVIEW(dev))
valleyview_disable_rps(dev);
else
gen6_disable_rps(dev);
@@ -4710,7 +5018,9 @@ static void intel_gen6_powersave_work(struct work_struct *work)
mutex_lock(&dev_priv->rps.hw_lock);
- if (IS_VALLEYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev)) {
+ cherryview_enable_rps(dev);
+ } else if (IS_VALLEYVIEW(dev)) {
valleyview_enable_rps(dev);
} else if (IS_BROADWELL(dev)) {
gen8_enable_rps(dev);
@@ -4735,7 +5045,7 @@ void intel_enable_gt_powersave(struct drm_device *dev)
ironlake_enable_rc6(dev);
intel_init_emon(dev);
mutex_unlock(&dev->struct_mutex);
- } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
+ } else if (INTEL_INFO(dev)->gen >= 6) {
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
@@ -5108,7 +5418,7 @@ static void gen8_init_clock_gating(struct drm_device *dev)
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
I915_WRITE(_3D_CHICKEN3,
- _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
+ _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
I915_WRITE(COMMON_SLICE_CHICKEN2,
_MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
@@ -5343,10 +5653,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
}
DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
- dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv);
- DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
- dev_priv->vlv_cdclk_freq);
-
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
/* WaDisableEarlyCull:vlv */
@@ -5421,6 +5727,35 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
static void cherryview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ switch ((val >> 2) & 0x7) {
+ case 0:
+ case 1:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200;
+ dev_priv->mem_freq = 1600;
+ break;
+ case 2:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267;
+ dev_priv->mem_freq = 1600;
+ break;
+ case 3:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333;
+ dev_priv->mem_freq = 2000;
+ break;
+ case 4:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320;
+ dev_priv->mem_freq = 1600;
+ break;
+ case 5:
+ dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400;
+ dev_priv->mem_freq = 1600;
+ break;
+ }
+ DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
@@ -5661,7 +5996,6 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- unsigned long irqflags;
/*
* After we re-enable the power well, if we touch VGA register 0x3d5
@@ -5677,21 +6011,8 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
- if (IS_BROADWELL(dev)) {
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
- dev_priv->de_irq_mask[PIPE_B]);
- I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
- ~dev_priv->de_irq_mask[PIPE_B] |
- GEN8_PIPE_VBLANK);
- I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
- dev_priv->de_irq_mask[PIPE_C]);
- I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
- ~dev_priv->de_irq_mask[PIPE_C] |
- GEN8_PIPE_VBLANK);
- POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- }
+ if (IS_BROADWELL(dev))
+ gen8_irq_power_well_post_enable(dev_priv);
}
static void hsw_set_power_well(struct drm_i915_private *dev_priv,
@@ -5762,34 +6083,13 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
return true;
}
-void __vlv_set_power_well(struct drm_i915_private *dev_priv,
- enum punit_power_well power_well_id, bool enable)
+static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well, bool enable)
{
- struct drm_device *dev = dev_priv->dev;
+ enum punit_power_well power_well_id = power_well->data;
u32 mask;
u32 state;
u32 ctrl;
- enum pipe pipe;
-
- if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
- if (enable) {
- /*
- * Enable the CRI clock source so we can get at the
- * display and the reference clock for VGA
- * hotplug / manual detection.
- */
- I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
- DPLL_REFA_CLK_ENABLE_VLV |
- DPLL_INTEGRATED_CRI_CLK_VLV);
- udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
- } else {
- for_each_pipe(pipe)
- assert_pll_disabled(dev_priv, pipe);
- /* Assert common reset */
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) &
- ~DPIO_CMNRST);
- }
- }
mask = PUNIT_PWRGT_MASK(power_well_id);
state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
@@ -5817,28 +6117,6 @@ void __vlv_set_power_well(struct drm_i915_private *dev_priv,
out:
mutex_unlock(&dev_priv->rps.hw_lock);
-
- /*
- * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
- * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
- * a. GUnit 0x2110 bit[0] set to 1 (def 0)
- * b. The other bits such as sfr settings / modesel may all
- * be set to 0.
- *
- * This should only be done on init and resume from S3 with
- * both PLLs disabled, or we risk losing DPIO and PLL
- * synchronization.
- */
- if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable)
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
-}
-
-static void vlv_set_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well, bool enable)
-{
- enum punit_power_well power_well_id = power_well->data;
-
- __vlv_set_power_well(dev_priv, power_well_id, enable);
}
static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -5930,6 +6208,53 @@ static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
vlv_set_power_well(dev_priv, power_well, false);
}
+static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+ /*
+ * Enable the CRI clock source so we can get at the
+ * display and the reference clock for VGA
+ * hotplug / manual detection.
+ */
+ I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+ udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+
+ vlv_set_power_well(dev_priv, power_well, true);
+
+ /*
+ * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+ * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
+ * a. GUnit 0x2110 bit[0] set to 1 (def 0)
+ * b. The other bits such as sfr settings / modesel may all
+ * be set to 0.
+ *
+ * This should only be done on init and resume from S3 with
+ * both PLLs disabled, or we risk losing DPIO and PLL
+ * synchronization.
+ */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
+static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ struct drm_device *dev = dev_priv->dev;
+ enum pipe pipe;
+
+ WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+
+ for_each_pipe(pipe)
+ assert_pll_disabled(dev_priv, pipe);
+
+ /* Assert common reset */
+ I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+
+ vlv_set_power_well(dev_priv, power_well, false);
+}
+
static void check_power_well_state(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -6079,6 +6404,7 @@ EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
+ BIT(POWER_DOMAIN_PLLS) | \
BIT(POWER_DOMAIN_INIT))
#define HSW_DISPLAY_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
@@ -6178,6 +6504,13 @@ static const struct i915_power_well_ops vlv_display_power_well_ops = {
.is_enabled = vlv_power_well_enabled,
};
+static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
+ .sync_hw = vlv_power_well_sync_hw,
+ .enable = vlv_dpio_cmn_power_well_enable,
+ .disable = vlv_dpio_cmn_power_well_disable,
+ .is_enabled = vlv_power_well_enabled,
+};
+
static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
.sync_hw = vlv_power_well_sync_hw,
.enable = vlv_power_well_enable,
@@ -6238,10 +6571,25 @@ static struct i915_power_well vlv_power_wells[] = {
.name = "dpio-common",
.domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
- .ops = &vlv_dpio_power_well_ops,
+ .ops = &vlv_dpio_cmn_power_well_ops,
},
};
+static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
+ enum punit_power_well power_well_id)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+ if (power_well->data == power_well_id)
+ return power_well;
+ }
+
+ return NULL;
+}
+
#define set_power_wells(power_domains, __power_wells) ({ \
(power_domains)->power_wells = (__power_wells); \
(power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
@@ -6292,11 +6640,50 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
}
+static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+ struct i915_power_well *disp2d =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
+
+ /* nothing to do if common lane is already off */
+ if (!cmn->ops->is_enabled(dev_priv, cmn))
+ return;
+
+ /* If the display might be already active skip this */
+ if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
+ I915_READ(DPIO_CTL) & DPIO_CMNRST)
+ return;
+
+ DRM_DEBUG_KMS("toggling display PHY side reset\n");
+
+ /* cmnlane needs DPLL registers */
+ disp2d->ops->enable(dev_priv, disp2d);
+
+ /*
+ * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+ * Need to assert and de-assert PHY SB reset by gating the
+ * common lane power, then un-gating it.
+ * Simply ungating isn't enough to reset the PHY enough to get
+ * ports and lanes running.
+ */
+ cmn->ops->disable(dev_priv, cmn);
+}
+
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
power_domains->initializing = true;
+
+ if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ mutex_lock(&power_domains->lock);
+ vlv_cmnlane_wa(dev_priv);
+ mutex_unlock(&power_domains->lock);
+ }
+
/* For now, we need the power well to be always enabled. */
intel_display_set_init_power(dev_priv, true);
intel_power_domains_resume(dev_priv);
@@ -6469,7 +6856,7 @@ void intel_init_pm(struct drm_device *dev)
(dev_priv->is_ddr3 == 1) ? "3" : "2",
dev_priv->fsb_freq, dev_priv->mem_freq);
/* Disable CxSR and never update its watermark again */
- pineview_disable_cxsr(dev);
+ intel_set_memory_cxsr(dev_priv, false);
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
@@ -6552,7 +6939,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
return 0;
}
-int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
{
int div;
@@ -6574,7 +6961,7 @@ int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
}
-int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
{
int mul;
@@ -6596,6 +6983,80 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
}
+static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+{
+ int div, freq;
+
+ switch (dev_priv->rps.cz_freq) {
+ case 200:
+ div = 5;
+ break;
+ case 267:
+ div = 6;
+ break;
+ case 320:
+ case 333:
+ case 400:
+ div = 8;
+ break;
+ default:
+ return -1;
+ }
+
+ freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
+
+ return freq;
+}
+
+static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+{
+ int mul, opcode;
+
+ switch (dev_priv->rps.cz_freq) {
+ case 200:
+ mul = 5;
+ break;
+ case 267:
+ mul = 6;
+ break;
+ case 320:
+ case 333:
+ case 400:
+ mul = 8;
+ break;
+ default:
+ return -1;
+ }
+
+ opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
+
+ return opcode;
+}
+
+int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+{
+ int ret = -1;
+
+ if (IS_CHERRYVIEW(dev_priv->dev))
+ ret = chv_gpu_freq(dev_priv, val);
+ else if (IS_VALLEYVIEW(dev_priv->dev))
+ ret = byt_gpu_freq(dev_priv, val);
+
+ return ret;
+}
+
+int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+{
+ int ret = -1;
+
+ if (IS_CHERRYVIEW(dev_priv->dev))
+ ret = chv_freq_opcode(dev_priv, val);
+ else if (IS_VALLEYVIEW(dev_priv->dev))
+ ret = byt_freq_opcode(dev_priv, val);
+
+ return ret;
+}
+
void intel_pm_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6606,5 +7067,5 @@ void intel_pm_setup(struct drm_device *dev)
intel_gen6_powersave_work);
dev_priv->pm.suspended = false;
- dev_priv->pm.irqs_disabled = false;
+ dev_priv->pm._irqs_disabled = false;
}
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
index a5e783a9928a..fd4f66231d30 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -28,7 +28,6 @@
struct intel_renderstate_rodata {
const u32 *reloc;
- const u32 reloc_items;
const u32 *batch;
const u32 batch_items;
};
@@ -40,7 +39,6 @@ extern const struct intel_renderstate_rodata gen8_null_state;
#define RO_RENDERSTATE(_g) \
const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
.reloc = gen ## _g ## _null_state_relocs, \
- .reloc_items = sizeof(gen ## _g ## _null_state_relocs)/4, \
.batch = gen ## _g ## _null_state_batch, \
.batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
}
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
index 740538ad0977..56c1429d8a60 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen6.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen6.c
@@ -6,6 +6,7 @@ static const u32 gen6_null_state_relocs[] = {
0x0000002c,
0x000001e0,
0x000001e4,
+ -1,
};
static const u32 gen6_null_state_batch[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
index 6fa7ff2a1298..419e35a7b0ff 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen7.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen7.c
@@ -5,6 +5,7 @@ static const u32 gen7_null_state_relocs[] = {
0x00000010,
0x00000018,
0x000001ec,
+ -1,
};
static const u32 gen7_null_state_batch[] = {
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
index 5c875615d42a..75ef1b5de45c 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
@@ -5,6 +5,7 @@ static const u32 gen8_null_state_relocs[] = {
0x00000050,
0x00000060,
0x000003ec,
+ -1,
};
static const u32 gen8_null_state_batch[] = {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 279488addf3f..b3d8f766fa7f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -48,9 +48,8 @@ static inline int __ring_space(int head, int tail, int size)
return space;
}
-static inline int ring_space(struct intel_engine_cs *ring)
+static inline int ring_space(struct intel_ringbuffer *ringbuf)
{
- struct intel_ringbuffer *ringbuf = ring->buffer;
return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
}
@@ -545,7 +544,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
else {
ringbuf->head = I915_READ_HEAD(ring);
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
ringbuf->last_retired_head = -1;
}
@@ -604,6 +603,8 @@ static int init_render_ring(struct intel_engine_cs *ring)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = init_ring_common(ring);
+ if (ret)
+ return ret;
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)
@@ -658,6 +659,13 @@ static int init_render_ring(struct intel_engine_cs *ring)
static void render_ring_cleanup(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->semaphore_obj) {
+ i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
+ drm_gem_object_unreference(&dev_priv->semaphore_obj->base);
+ dev_priv->semaphore_obj = NULL;
+ }
if (ring->scratch.obj == NULL)
return;
@@ -671,29 +679,96 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
ring->scratch.obj = NULL;
}
+static int gen8_rcs_signal(struct intel_engine_cs *signaller,
+ unsigned int num_dwords)
+{
+#define MBOX_UPDATE_DWORDS 8
+ struct drm_device *dev = signaller->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *waiter;
+ int i, ret, num_rings;
+
+ num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
+#undef MBOX_UPDATE_DWORDS
+
+ ret = intel_ring_begin(signaller, num_dwords);
+ if (ret)
+ return ret;
+
+ for_each_ring(waiter, dev_priv, i) {
+ u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+ if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
+ continue;
+
+ intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
+ intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_FLUSH_ENABLE);
+ intel_ring_emit(signaller, lower_32_bits(gtt_offset));
+ intel_ring_emit(signaller, upper_32_bits(gtt_offset));
+ intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, 0);
+ intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->id));
+ intel_ring_emit(signaller, 0);
+ }
+
+ return 0;
+}
+
+static int gen8_xcs_signal(struct intel_engine_cs *signaller,
+ unsigned int num_dwords)
+{
+#define MBOX_UPDATE_DWORDS 6
+ struct drm_device *dev = signaller->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_engine_cs *waiter;
+ int i, ret, num_rings;
+
+ num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
+#undef MBOX_UPDATE_DWORDS
+
+ ret = intel_ring_begin(signaller, num_dwords);
+ if (ret)
+ return ret;
+
+ for_each_ring(waiter, dev_priv, i) {
+ u64 gtt_offset = signaller->semaphore.signal_ggtt[i];
+ if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
+ continue;
+
+ intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
+ MI_FLUSH_DW_OP_STOREDW);
+ intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
+ MI_FLUSH_DW_USE_GTT);
+ intel_ring_emit(signaller, upper_32_bits(gtt_offset));
+ intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
+ intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->id));
+ intel_ring_emit(signaller, 0);
+ }
+
+ return 0;
+}
+
static int gen6_signal(struct intel_engine_cs *signaller,
unsigned int num_dwords)
{
struct drm_device *dev = signaller->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *useless;
- int i, ret;
+ int i, ret, num_rings;
- /* NB: In order to be able to do semaphore MBOX updates for varying
- * number of rings, it's easiest if we round up each individual update
- * to a multiple of 2 (since ring updates must always be a multiple of
- * 2) even though the actual update only requires 3 dwords.
- */
-#define MBOX_UPDATE_DWORDS 4
- if (i915_semaphore_is_enabled(dev))
- num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS);
- else
- return intel_ring_begin(signaller, num_dwords);
+#define MBOX_UPDATE_DWORDS 3
+ num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+ num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
+#undef MBOX_UPDATE_DWORDS
ret = intel_ring_begin(signaller, num_dwords);
if (ret)
return ret;
-#undef MBOX_UPDATE_DWORDS
for_each_ring(useless, dev_priv, i) {
u32 mbox_reg = signaller->semaphore.mbox.signal[i];
@@ -701,15 +776,13 @@ static int gen6_signal(struct intel_engine_cs *signaller,
intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(signaller, mbox_reg);
intel_ring_emit(signaller, signaller->outstanding_lazy_seqno);
- intel_ring_emit(signaller, MI_NOOP);
- } else {
- intel_ring_emit(signaller, MI_NOOP);
- intel_ring_emit(signaller, MI_NOOP);
- intel_ring_emit(signaller, MI_NOOP);
- intel_ring_emit(signaller, MI_NOOP);
}
}
+ /* If num_dwords was rounded, make sure the tail pointer is correct */
+ if (num_rings % 2 == 0)
+ intel_ring_emit(signaller, MI_NOOP);
+
return 0;
}
@@ -727,7 +800,11 @@ gen6_add_request(struct intel_engine_cs *ring)
{
int ret;
- ret = ring->semaphore.signal(ring, 4);
+ if (ring->semaphore.signal)
+ ret = ring->semaphore.signal(ring, 4);
+ else
+ ret = intel_ring_begin(ring, 4);
+
if (ret)
return ret;
@@ -754,6 +831,32 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
* @signaller - ring which has, or will signal
* @seqno - seqno which the waiter will block on
*/
+
+static int
+gen8_ring_sync(struct intel_engine_cs *waiter,
+ struct intel_engine_cs *signaller,
+ u32 seqno)
+{
+ struct drm_i915_private *dev_priv = waiter->dev->dev_private;
+ int ret;
+
+ ret = intel_ring_begin(waiter, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD);
+ intel_ring_emit(waiter, seqno);
+ intel_ring_emit(waiter,
+ lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+ intel_ring_emit(waiter,
+ upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
+ intel_ring_advance(waiter);
+ return 0;
+}
+
static int
gen6_ring_sync(struct intel_engine_cs *waiter,
struct intel_engine_cs *signaller,
@@ -901,7 +1004,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0)
- ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+ gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
@@ -916,7 +1019,7 @@ gen5_ring_put_irq(struct intel_engine_cs *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0)
- ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+ gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
@@ -1109,7 +1212,7 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
GT_PARITY_ERROR(dev)));
else
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
- ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
+ gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -1129,7 +1232,7 @@ gen6_ring_put_irq(struct intel_engine_cs *ring)
I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
else
I915_WRITE_IMR(ring, ~0);
- ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
+ gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
@@ -1147,7 +1250,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
- snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
+ gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -1167,7 +1270,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring)
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
I915_WRITE_IMR(ring, ~0);
- snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
+ gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
@@ -1329,6 +1432,7 @@ static int init_status_page(struct intel_engine_cs *ring)
struct drm_i915_gem_object *obj;
if ((obj = ring->status_page.obj) == NULL) {
+ unsigned flags;
int ret;
obj = i915_gem_alloc_object(ring->dev, 4096);
@@ -1341,7 +1445,20 @@ static int init_status_page(struct intel_engine_cs *ring)
if (ret)
goto err_unref;
- ret = i915_gem_obj_ggtt_pin(obj, 4096, 0);
+ flags = 0;
+ if (!HAS_LLC(ring->dev))
+ /* On g33, we cannot place HWS above 256MiB, so
+ * restrict its pinning to the low mappable arena.
+ * Though this restriction is not documented for
+ * gen4, gen5, or byt, they also behave similarly
+ * and hang if the HWS is placed at the top of the
+ * GTT. To generalise, it appears that all !llc
+ * platforms have issues with us placing the HWS
+ * above the mappable region (even though we never
+ * actualy map it).
+ */
+ flags |= PIN_MAPPABLE;
+ ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
if (ret) {
err_unref:
drm_gem_object_unreference(&obj->base);
@@ -1378,15 +1495,25 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
return 0;
}
-static int allocate_ring_buffer(struct intel_engine_cs *ring)
+static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+{
+ if (!ringbuf->obj)
+ return;
+
+ iounmap(ringbuf->virtual_start);
+ i915_gem_object_ggtt_unpin(ringbuf->obj);
+ drm_gem_object_unreference(&ringbuf->obj->base);
+ ringbuf->obj = NULL;
+}
+
+static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
+ struct intel_ringbuffer *ringbuf)
{
- struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_ringbuffer *ringbuf = ring->buffer;
struct drm_i915_gem_object *obj;
int ret;
- if (intel_ring_initialized(ring))
+ if (ringbuf->obj)
return 0;
obj = NULL;
@@ -1397,6 +1524,9 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring)
if (obj == NULL)
return -ENOMEM;
+ /* mark ring buffers as read-only from GPU side by default */
+ obj->gt_ro = 1;
+
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
if (ret)
goto err_unref;
@@ -1455,7 +1585,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto error;
}
- ret = allocate_ring_buffer(ring);
+ ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
goto error;
@@ -1496,11 +1626,7 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
intel_stop_ring_buffer(ring);
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
- iounmap(ringbuf->virtual_start);
-
- i915_gem_object_ggtt_unpin(ringbuf->obj);
- drm_gem_object_unreference(&ringbuf->obj->base);
- ringbuf->obj = NULL;
+ intel_destroy_ringbuffer_obj(ringbuf);
ring->preallocated_lazy_request = NULL;
ring->outstanding_lazy_seqno = 0;
@@ -1526,7 +1652,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
if (ringbuf->space >= n)
return 0;
}
@@ -1549,7 +1675,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
ringbuf->head = ringbuf->last_retired_head;
ringbuf->last_retired_head = -1;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
return 0;
}
@@ -1578,7 +1704,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
trace_i915_ring_wait_begin(ring);
do {
ringbuf->head = I915_READ_HEAD(ring);
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
if (ringbuf->space >= n) {
ret = 0;
break;
@@ -1630,7 +1756,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
iowrite32(MI_NOOP, virt++);
ringbuf->tail = 0;
- ringbuf->space = ring_space(ring);
+ ringbuf->space = ring_space(ringbuf);
return 0;
}
@@ -1746,14 +1872,15 @@ int intel_ring_cacheline_align(struct intel_engine_cs *ring)
void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
BUG_ON(ring->outstanding_lazy_seqno);
- if (INTEL_INFO(ring->dev)->gen >= 6) {
+ if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
- if (HAS_VEBOX(ring->dev))
+ if (HAS_VEBOX(dev))
I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
}
@@ -1941,45 +2068,74 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+ struct drm_i915_gem_object *obj;
+ int ret;
ring->name = "render ring";
ring->id = RCS;
ring->mmio_base = RENDER_RING_BASE;
- if (INTEL_INFO(dev)->gen >= 6) {
+ if (INTEL_INFO(dev)->gen >= 8) {
+ if (i915_semaphore_is_enabled(dev)) {
+ obj = i915_gem_alloc_object(dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
+ i915.semaphores = 0;
+ } else {
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
+ if (ret != 0) {
+ drm_gem_object_unreference(&obj->base);
+ DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
+ i915.semaphores = 0;
+ } else
+ dev_priv->semaphore_obj = obj;
+ }
+ }
+ ring->add_request = gen6_add_request;
+ ring->flush = gen8_render_ring_flush;
+ ring->irq_get = gen8_ring_get_irq;
+ ring->irq_put = gen8_ring_put_irq;
+ ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
+ ring->get_seqno = gen6_ring_get_seqno;
+ ring->set_seqno = ring_set_seqno;
+ if (i915_semaphore_is_enabled(dev)) {
+ WARN_ON(!dev_priv->semaphore_obj);
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_rcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
+ } else if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
ring->flush = gen7_render_ring_flush;
if (INTEL_INFO(dev)->gen == 6)
ring->flush = gen6_render_ring_flush;
- if (INTEL_INFO(dev)->gen >= 8) {
- ring->flush = gen8_render_ring_flush;
- ring->irq_get = gen8_ring_get_irq;
- ring->irq_put = gen8_ring_put_irq;
- } else {
- ring->irq_get = gen6_ring_get_irq;
- ring->irq_put = gen6_ring_put_irq;
- }
+ ring->irq_get = gen6_ring_get_irq;
+ ring->irq_put = gen6_ring_put_irq;
ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- /*
- * The current semaphore is only applied on pre-gen8 platform.
- * And there is no VCS2 ring on the pre-gen8 platform. So the
- * semaphore between RCS and VCS2 is initialized as INVALID.
- * Gen8 will initialize the sema between VCS2 and RCS later.
- */
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ /*
+ * The current semaphore is only applied on pre-gen8
+ * platform. And there is no VCS2 ring on the pre-gen8
+ * platform. So the semaphore between RCS and VCS2 is
+ * initialized as INVALID. Gen8 will initialize the
+ * sema between VCS2 and RCS later.
+ */
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ }
} else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request;
ring->flush = gen4_render_ring_flush;
@@ -2007,6 +2163,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->irq_enable_mask = I915_USER_INTERRUPT;
}
ring->write_tail = ring_write_tail;
+
if (IS_HASWELL(dev))
ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
else if (IS_GEN8(dev))
@@ -2024,9 +2181,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
/* Workaround batchbuffer to combat CS tlb bug. */
if (HAS_BROKEN_CS_TLB(dev)) {
- struct drm_i915_gem_object *obj;
- int ret;
-
obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
if (obj == NULL) {
DRM_ERROR("Failed to allocate batch bo\n");
@@ -2157,31 +2311,32 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
} else {
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer =
gen6_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ }
}
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- /*
- * The current semaphore is only applied on pre-gen8 platform.
- * And there is no VCS2 ring on the pre-gen8 platform. So the
- * semaphore between VCS and VCS2 is initialized as INVALID.
- * Gen8 will initialize the sema between VCS2 and VCS later.
- */
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
} else {
ring->mmio_base = BSD_RING_BASE;
ring->flush = bsd_ring_flush;
@@ -2218,7 +2373,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
return -EINVAL;
}
- ring->name = "bds2_ring";
+ ring->name = "bsd2 ring";
ring->id = VCS2;
ring->write_tail = ring_write_tail;
@@ -2233,25 +2388,11 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- /*
- * The current semaphore is only applied on the pre-gen8. And there
- * is no bsd2 ring on the pre-gen8. So now the semaphore_register
- * between VCS2 and other ring is initialized as invalid.
- * Gen8 will initialize the sema between VCS2 and other ring later.
- */
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
-
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
@@ -2277,30 +2418,38 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
} else {
ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.signal = gen6_signal;
+ ring->semaphore.sync_to = gen6_ring_sync;
+ /*
+ * The current semaphore is only applied on pre-gen8
+ * platform. And there is no VCS2 ring on the pre-gen8
+ * platform. So the semaphore between BCS and VCS2 is
+ * initialized as INVALID. Gen8 will initialize the
+ * sema between BCS and VCS2 later.
+ */
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ }
}
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- /*
- * The current semaphore is only applied on pre-gen8 platform. And
- * there is no VCS2 ring on the pre-gen8 platform. So the semaphore
- * between BCS and VCS2 is initialized as INVALID.
- * Gen8 will initialize the sema between BCS and VCS2 later.
- */
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
@@ -2327,24 +2476,31 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
ring->irq_get = gen8_ring_get_irq;
ring->irq_put = gen8_ring_put_irq;
ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen8_ring_sync;
+ ring->semaphore.signal = gen8_xcs_signal;
+ GEN8_RING_SEMAPHORE_INIT;
+ }
} else {
ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
ring->irq_get = hsw_vebox_get_irq;
ring->irq_put = hsw_vebox_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+ if (i915_semaphore_is_enabled(dev)) {
+ ring->semaphore.sync_to = gen6_ring_sync;
+ ring->semaphore.signal = gen6_signal;
+ ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
+ ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
+ ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
+ ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
+ ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
+ ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
+ ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
+ ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
+ ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
+ }
}
- ring->semaphore.sync_to = gen6_ring_sync;
- ring->semaphore.signal = gen6_signal;
- ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
- ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
- ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
- ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
- ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
- ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
- ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
- ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
- ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index e72017bdcd7f..ed5941078f92 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -40,6 +40,32 @@ struct intel_hw_status_page {
#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
+/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
+ * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
+ */
+#define i915_semaphore_seqno_size sizeof(uint64_t)
+#define GEN8_SIGNAL_OFFSET(__ring, to) \
+ (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+ ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
+ (i915_semaphore_seqno_size * (to)))
+
+#define GEN8_WAIT_OFFSET(__ring, from) \
+ (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
+ ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \
+ (i915_semaphore_seqno_size * (__ring)->id))
+
+#define GEN8_RING_SEMAPHORE_INIT do { \
+ if (!dev_priv->semaphore_obj) { \
+ break; \
+ } \
+ ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \
+ ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \
+ ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \
+ ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \
+ ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \
+ ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \
+ } while(0)
+
enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0,
HANGCHECK_WAIT,
@@ -127,15 +153,55 @@ struct intel_engine_cs {
#define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_engine_cs *ring);
+ /* GEN8 signal/wait table - never trust comments!
+ * signal to signal to signal to signal to signal to
+ * RCS VCS BCS VECS VCS2
+ * --------------------------------------------------------------------
+ * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
+ * |-------------------------------------------------------------------
+ * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
+ * |-------------------------------------------------------------------
+ * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
+ * |-------------------------------------------------------------------
+ * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
+ * |-------------------------------------------------------------------
+ * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
+ * |-------------------------------------------------------------------
+ *
+ * Generalization:
+ * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
+ * ie. transpose of g(x, y)
+ *
+ * sync from sync from sync from sync from sync from
+ * RCS VCS BCS VECS VCS2
+ * --------------------------------------------------------------------
+ * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
+ * |-------------------------------------------------------------------
+ * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
+ * |-------------------------------------------------------------------
+ * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
+ * |-------------------------------------------------------------------
+ * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
+ * |-------------------------------------------------------------------
+ * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
+ * |-------------------------------------------------------------------
+ *
+ * Generalization:
+ * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
+ * ie. transpose of f(x, y)
+ */
struct {
u32 sync_seqno[I915_NUM_RINGS-1];
- struct {
- /* our mbox written by others */
- u32 wait[I915_NUM_RINGS];
- /* mboxes this ring signals to */
- u32 signal[I915_NUM_RINGS];
- } mbox;
+ union {
+ struct {
+ /* our mbox written by others */
+ u32 wait[I915_NUM_RINGS];
+ /* mboxes this ring signals to */
+ u32 signal[I915_NUM_RINGS];
+ } mbox;
+ u64 signal_ggtt[I915_NUM_RINGS];
+ };
/* AKA wait() */
int (*sync_to)(struct intel_engine_cs *ring,
@@ -238,9 +304,11 @@ intel_ring_sync_index(struct intel_engine_cs *ring,
int idx;
/*
- * cs -> 0 = vcs, 1 = bcs
- * vcs -> 0 = bcs, 1 = cs,
- * bcs -> 0 = cs, 1 = vcs.
+ * rcs -> 0 = vcs, 1 = bcs, 2 = vecs, 3 = vcs2;
+ * vcs -> 0 = bcs, 1 = vecs, 2 = vcs2, 3 = rcs;
+ * bcs -> 0 = vecs, 1 = vcs2. 2 = rcs, 3 = vcs;
+ * vecs -> 0 = vcs2, 1 = rcs, 2 = vcs, 3 = bcs;
+ * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs;
*/
idx = (other - ring) - 1;
@@ -318,9 +386,9 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev);
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
void intel_ring_setup_status_page(struct intel_engine_cs *ring);
-static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring)
+static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
{
- return ring->buffer->tail;
+ return ringbuf->tail;
}
static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 20375cc7f82d..9350edd6728d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2433,7 +2433,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
connector->base.unregister = intel_sdvo_connector_unregister;
intel_connector_attach_encoder(&connector->base, &encoder->base);
- ret = drm_sysfs_connector_add(drm_connector);
+ ret = drm_connector_register(drm_connector);
if (ret < 0)
goto err1;
@@ -2446,7 +2446,7 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
return 0;
err2:
- drm_sysfs_connector_remove(drm_connector);
+ drm_connector_unregister(drm_connector);
err1:
drm_connector_cleanup(drm_connector);
@@ -2559,7 +2559,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
return true;
err:
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
intel_sdvo_destroy(connector);
return false;
}
@@ -2638,7 +2638,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
return true;
err:
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
intel_sdvo_destroy(connector);
return false;
}
@@ -2711,7 +2711,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
list_for_each_entry_safe(connector, tmp,
&dev->mode_config.connector_list, head) {
if (intel_attached_encoder(connector) == &intel_sdvo->base) {
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
intel_sdvo_destroy(connector);
}
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 9a17b4e92ef4..168c6652cda1 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -218,7 +218,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
sprctl |= SP_ENABLE;
- intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true,
+ intel_update_sprite_watermarks(dplane, crtc, src_w, src_h,
+ pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
/* Sizes are 0 based */
@@ -283,7 +284,7 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
if (atomic_update)
intel_pipe_update_end(intel_crtc, start_vbl_count);
- intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
+ intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
}
static int
@@ -406,7 +407,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
- intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
+ intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size,
+ true,
src_w != crtc_w || src_h != crtc_h);
/* Sizes are 0 based */
@@ -486,7 +488,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
*/
intel_wait_for_vblank(dev, pipe);
- intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
+ intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
}
static int
@@ -606,7 +608,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
dvscntr |= DVS_ENABLE;
- intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
+ intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
+ pixel_size, true,
src_w != crtc_w || src_h != crtc_h);
/* Sizes are 0 based */
@@ -681,7 +684,7 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
*/
intel_wait_for_vblank(dev, pipe);
- intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
+ intel_update_sprite_watermarks(plane, crtc, 0, 0, 0, false, false);
}
static void
@@ -819,6 +822,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_device *dev = plane->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_plane *intel_plane = to_intel_plane(plane);
+ enum pipe pipe = intel_crtc->pipe;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct drm_i915_gem_object *old_obj = intel_plane->obj;
@@ -1006,6 +1010,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
*/
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ i915_gem_track_fb(old_obj, obj,
+ INTEL_FRONTBUFFER_SPRITE(pipe));
mutex_unlock(&dev->struct_mutex);
if (ret)
@@ -1039,6 +1045,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
else
intel_plane->disable_plane(plane, crtc);
+ intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
+
if (!primary_was_enabled && primary_enabled)
intel_post_enable_primary(crtc);
}
@@ -1068,6 +1076,7 @@ intel_disable_plane(struct drm_plane *plane)
struct drm_device *dev = plane->dev;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc;
+ enum pipe pipe;
if (!plane->fb)
return 0;
@@ -1076,6 +1085,7 @@ intel_disable_plane(struct drm_plane *plane)
return -EINVAL;
intel_crtc = to_intel_crtc(plane->crtc);
+ pipe = intel_crtc->pipe;
if (intel_crtc->active) {
bool primary_was_enabled = intel_crtc->primary_enabled;
@@ -1094,6 +1104,8 @@ intel_disable_plane(struct drm_plane *plane)
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(intel_plane->obj);
+ i915_gem_track_fb(intel_plane->obj, NULL,
+ INTEL_FRONTBUFFER_SPRITE(pipe));
mutex_unlock(&dev->struct_mutex);
intel_plane->obj = NULL;
@@ -1114,7 +1126,6 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *set = data;
- struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
@@ -1128,13 +1139,12 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
- if (!obj) {
+ plane = drm_plane_find(dev, set->plane_id);
+ if (!plane) {
ret = -ENOENT;
goto out_unlock;
}
- plane = obj_to_plane(obj);
intel_plane = to_intel_plane(plane);
ret = intel_plane->update_colorkey(plane, set);
@@ -1147,7 +1157,6 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *get = data;
- struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
@@ -1157,13 +1166,12 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
- obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
- if (!obj) {
+ plane = drm_plane_find(dev, get->plane_id);
+ if (!plane) {
ret = -ENOENT;
goto out_unlock;
}
- plane = obj_to_plane(obj);
intel_plane = to_intel_plane(plane);
intel_plane->get_colorkey(plane, get);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 67c6c9a2eb1c..e211eef4b7e4 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1680,5 +1680,5 @@ intel_tv_init(struct drm_device *dev)
drm_object_attach_property(&connector->base,
dev->mode_config.tv_bottom_margin_property,
intel_tv->margin[TV_MARGIN_BOTTOM]);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4f6fef7ac069..e81bc3bdc533 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -231,8 +231,8 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
}
/* WaRsForcewakeWaitTC0:vlv */
- __gen6_gt_wait_for_thread_c0(dev_priv);
-
+ if (!IS_CHERRYVIEW(dev_priv->dev))
+ __gen6_gt_wait_for_thread_c0(dev_priv);
}
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
@@ -250,9 +250,10 @@ static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- /* The below doubles as a POSTING_READ */
- gen6_gt_check_fifodbg(dev_priv);
-
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
+ if (!IS_CHERRYVIEW(dev_priv->dev))
+ gen6_gt_check_fifodbg(dev_priv);
}
static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
@@ -315,7 +316,7 @@ static void gen6_force_wake_timer(unsigned long arg)
intel_runtime_pm_put(dev_priv);
}
-static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
+void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long irqflags;
@@ -357,16 +358,12 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
dev_priv->uncore.fifo_count =
__raw_i915_read32(dev_priv, GTFIFOCTL) &
GT_FIFO_FREE_ENTRIES_MASK;
- } else {
- dev_priv->uncore.forcewake_count = 0;
- dev_priv->uncore.fw_rendercount = 0;
- dev_priv->uncore.fw_mediacount = 0;
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-void intel_uncore_early_sanitize(struct drm_device *dev)
+void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -389,7 +386,7 @@ void intel_uncore_early_sanitize(struct drm_device *dev)
__raw_i915_write32(dev_priv, GTFIFODBG,
__raw_i915_read32(dev_priv, GTFIFODBG));
- intel_uncore_forcewake_reset(dev, false);
+ intel_uncore_forcewake_reset(dev, restore_forcewake);
}
void intel_uncore_sanitize(struct drm_device *dev)
@@ -469,16 +466,43 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
((reg) < 0x40000 && (reg) != FORCEWAKE)
-#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
- (((reg) >= 0x2000 && (reg) < 0x4000) ||\
- ((reg) >= 0x5000 && (reg) < 0x8000) ||\
- ((reg) >= 0xB000 && (reg) < 0x12000) ||\
- ((reg) >= 0x2E000 && (reg) < 0x30000))
+#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
-#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
- (((reg) >= 0x12000 && (reg) < 0x14000) ||\
- ((reg) >= 0x22000 && (reg) < 0x24000) ||\
- ((reg) >= 0x30000 && (reg) < 0x40000))
+#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x2000, 0x4000) || \
+ REG_RANGE((reg), 0x5000, 0x8000) || \
+ REG_RANGE((reg), 0xB000, 0x12000) || \
+ REG_RANGE((reg), 0x2E000, 0x30000))
+
+#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x12000, 0x14000) || \
+ REG_RANGE((reg), 0x22000, 0x24000) || \
+ REG_RANGE((reg), 0x30000, 0x40000))
+
+#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x2000, 0x4000) || \
+ REG_RANGE((reg), 0x5000, 0x8000) || \
+ REG_RANGE((reg), 0x8300, 0x8500) || \
+ REG_RANGE((reg), 0xB000, 0xC000) || \
+ REG_RANGE((reg), 0xE000, 0xE800))
+
+#define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x8800, 0x8900) || \
+ REG_RANGE((reg), 0xD000, 0xD800) || \
+ REG_RANGE((reg), 0x12000, 0x14000) || \
+ REG_RANGE((reg), 0x1A000, 0x1C000) || \
+ REG_RANGE((reg), 0x1E800, 0x1EA00) || \
+ REG_RANGE((reg), 0x30000, 0x40000))
+
+#define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \
+ (REG_RANGE((reg), 0x4000, 0x5000) || \
+ REG_RANGE((reg), 0x8000, 0x8300) || \
+ REG_RANGE((reg), 0x8500, 0x8600) || \
+ REG_RANGE((reg), 0x9000, 0xB000) || \
+ REG_RANGE((reg), 0xC000, 0xC800) || \
+ REG_RANGE((reg), 0xF000, 0x10000) || \
+ REG_RANGE((reg), 0x14000, 0x14400) || \
+ REG_RANGE((reg), 0x22000, 0x24000))
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
@@ -490,20 +514,30 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
}
static void
-hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
+hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read,
+ bool before)
{
+ const char *op = read ? "reading" : "writing to";
+ const char *when = before ? "before" : "after";
+
+ if (!i915.mmio_debug)
+ return;
+
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
- DRM_ERROR("Unknown unclaimed register before writing to %x\n",
- reg);
+ WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
+ when, op, reg);
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
static void
-hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
+hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
{
+ if (i915.mmio_debug)
+ return;
+
if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
- DRM_ERROR("Unclaimed write to %x\n", reg);
+ DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem.");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
}
}
@@ -540,6 +574,7 @@ gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_HEADER(x); \
+ hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
if (dev_priv->uncore.forcewake_count == 0 && \
NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
@@ -550,6 +585,7 @@ gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
} else { \
val = __raw_i915_read##x(dev_priv, reg); \
} \
+ hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
REG_READ_FOOTER; \
}
@@ -573,7 +609,35 @@ vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
REG_READ_FOOTER; \
}
+#define __chv_read(x) \
+static u##x \
+chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+ unsigned fwengine = 0; \
+ REG_READ_HEADER(x); \
+ if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine = FORCEWAKE_RENDER; \
+ } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine = FORCEWAKE_MEDIA; \
+ } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine |= FORCEWAKE_RENDER; \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine |= FORCEWAKE_MEDIA; \
+ } \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
+ REG_READ_FOOTER; \
+}
+__chv_read(8)
+__chv_read(16)
+__chv_read(32)
+__chv_read(64)
__vlv_read(8)
__vlv_read(16)
__vlv_read(32)
@@ -591,6 +655,7 @@ __gen4_read(16)
__gen4_read(32)
__gen4_read(64)
+#undef __chv_read
#undef __vlv_read
#undef __gen6_read
#undef __gen5_read
@@ -647,12 +712,13 @@ hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
- hsw_unclaimed_reg_clear(dev_priv, reg); \
+ hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
__raw_i915_write##x(dev_priv, reg, val); \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
- hsw_unclaimed_reg_check(dev_priv, reg); \
+ hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
+ hsw_unclaimed_reg_detect(dev_priv); \
REG_WRITE_FOOTER; \
}
@@ -681,6 +747,7 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
static void \
gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
REG_WRITE_HEADER; \
+ hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \
if (dev_priv->uncore.forcewake_count == 0) \
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
@@ -692,9 +759,43 @@ gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace
} else { \
__raw_i915_write##x(dev_priv, reg, val); \
} \
+ hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
+ hsw_unclaimed_reg_detect(dev_priv); \
REG_WRITE_FOOTER; \
}
+#define __chv_write(x) \
+static void \
+chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+ unsigned fwengine = 0; \
+ bool shadowed = is_gen8_shadowed(dev_priv, reg); \
+ REG_WRITE_HEADER; \
+ if (!shadowed) { \
+ if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine = FORCEWAKE_RENDER; \
+ } else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine = FORCEWAKE_MEDIA; \
+ } else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) { \
+ if (dev_priv->uncore.fw_rendercount == 0) \
+ fwengine |= FORCEWAKE_RENDER; \
+ if (dev_priv->uncore.fw_mediacount == 0) \
+ fwengine |= FORCEWAKE_MEDIA; \
+ } \
+ } \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ if (fwengine) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
+ REG_WRITE_FOOTER; \
+}
+
+__chv_write(8)
+__chv_write(16)
+__chv_write(32)
+__chv_write(64)
__gen8_write(8)
__gen8_write(16)
__gen8_write(32)
@@ -716,6 +817,7 @@ __gen4_write(16)
__gen4_write(32)
__gen4_write(64)
+#undef __chv_write
#undef __gen8_write
#undef __hsw_write
#undef __gen6_write
@@ -731,7 +833,7 @@ void intel_uncore_init(struct drm_device *dev)
setup_timer(&dev_priv->uncore.force_wake_timer,
gen6_force_wake_timer, (unsigned long)dev_priv);
- intel_uncore_early_sanitize(dev);
+ intel_uncore_early_sanitize(dev, false);
if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
@@ -779,14 +881,26 @@ void intel_uncore_init(struct drm_device *dev)
switch (INTEL_INFO(dev)->gen) {
default:
- dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
- dev_priv->uncore.funcs.mmio_writew = gen8_write16;
- dev_priv->uncore.funcs.mmio_writel = gen8_write32;
- dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
- dev_priv->uncore.funcs.mmio_readb = gen6_read8;
- dev_priv->uncore.funcs.mmio_readw = gen6_read16;
- dev_priv->uncore.funcs.mmio_readl = gen6_read32;
- dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ if (IS_CHERRYVIEW(dev)) {
+ dev_priv->uncore.funcs.mmio_writeb = chv_write8;
+ dev_priv->uncore.funcs.mmio_writew = chv_write16;
+ dev_priv->uncore.funcs.mmio_writel = chv_write32;
+ dev_priv->uncore.funcs.mmio_writeq = chv_write64;
+ dev_priv->uncore.funcs.mmio_readb = chv_read8;
+ dev_priv->uncore.funcs.mmio_readw = chv_read16;
+ dev_priv->uncore.funcs.mmio_readl = chv_read32;
+ dev_priv->uncore.funcs.mmio_readq = chv_read64;
+
+ } else {
+ dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
+ dev_priv->uncore.funcs.mmio_writew = gen8_write16;
+ dev_priv->uncore.funcs.mmio_writel = gen8_write32;
+ dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
+ dev_priv->uncore.funcs.mmio_readb = gen6_read8;
+ dev_priv->uncore.funcs.mmio_readw = gen6_read16;
+ dev_priv->uncore.funcs.mmio_readl = gen6_read32;
+ dev_priv->uncore.funcs.mmio_readq = gen6_read64;
+ }
break;
case 7:
case 6:
@@ -912,7 +1026,7 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
if (args->flags || args->pad)
return -EINVAL;
- if (args->ctx_id == DEFAULT_CONTEXT_ID && !capable(CAP_SYS_ADMIN))
+ if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
return -EPERM;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1053,18 +1167,16 @@ static int gen6_do_reset(struct drm_device *dev)
int intel_gpu_reset(struct drm_device *dev)
{
- switch (INTEL_INFO(dev)->gen) {
- case 8:
- case 7:
- case 6: return gen6_do_reset(dev);
- case 5: return ironlake_do_reset(dev);
- case 4:
- if (IS_G4X(dev))
- return g4x_do_reset(dev);
- else
- return i965_do_reset(dev);
- default: return -ENODEV;
- }
+ if (INTEL_INFO(dev)->gen >= 6)
+ return gen6_do_reset(dev);
+ else if (IS_GEN5(dev))
+ return ironlake_do_reset(dev);
+ else if (IS_G4X(dev))
+ return g4x_do_reset(dev);
+ else if (IS_GEN4(dev))
+ return i965_do_reset(dev);
+ else
+ return -ENODEV;
}
void intel_uncore_check_errors(struct drm_device *dev)
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index cf11ee68a6d9..80de23d9b9c9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -280,7 +280,7 @@ static inline int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
{
int ret;
- ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, NULL);
if (ret) {
if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 13b7dd83faa9..5451dc58eff1 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -272,7 +272,7 @@ static int mga_fbdev_destroy(struct drm_device *dev,
return 0;
}
-static struct drm_fb_helper_funcs mga_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs mga_fb_helper_funcs = {
.gamma_set = mga_crtc_fb_gamma_set,
.gamma_get = mga_crtc_fb_gamma_get,
.fb_probe = mgag200fb_create,
@@ -293,9 +293,10 @@ int mgag200_fbdev_init(struct mga_device *mdev)
return -ENOMEM;
mdev->mfbdev = mfbdev;
- mfbdev->helper.funcs = &mga_fb_helper_funcs;
spin_lock_init(&mfbdev->dirty_lock);
+ drm_fb_helper_prepare(mdev->dev, &mfbdev->helper, &mga_fb_helper_funcs);
+
ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
mdev->num_crtc, MGAG200FB_CONN_LIMIT);
if (ret)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index a034ed408252..45f04dea0ac2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1562,19 +1562,9 @@ static struct drm_encoder *mga_connector_best_encoder(struct drm_connector
*connector)
{
int enc_id = connector->encoder_ids[0];
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
/* pick the encoder ids */
- if (enc_id) {
- obj =
- drm_mode_object_find(connector->dev, enc_id,
- DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
- }
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
@@ -1621,7 +1611,7 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev)
drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
mga_connector->i2c = mgag200_i2c_create(dev);
if (!mga_connector->i2c)
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index f12388967856..c99c50de3226 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -2,7 +2,6 @@
config DRM_MSM
tristate "MSM DRM"
depends on DRM
- depends on MSM_IOMMU
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
select DRM_KMS_HELPER
select SHMEM
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 85d615e7d62f..a8a144b38eaa 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16)
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -203,6 +203,15 @@ enum a2xx_rb_copy_sample_select {
SAMPLE_0123 = 6,
};
+enum a2xx_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_MIN_DST_SRC = 2,
+ BLEND_MAX_DST_SRC = 3,
+ BLEND_DST_MINUS_SRC = 4,
+ BLEND_DST_PLUS_SRC_BIAS = 5,
+};
+
enum adreno_mmu_clnt_beh {
BEH_NEVR = 0,
BEH_TRAN_RNG = 1,
@@ -890,6 +899,39 @@ static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val)
#define REG_A2XX_VGT_EVENT_INITIATOR 0x000021f9
#define REG_A2XX_VGT_DRAW_INITIATOR 0x000021fc
+#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f
+#define A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0
+#define A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A2XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600
+#define A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A2XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800
+#define A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK;
+}
+#define A2XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
+#define A2XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
+#define A2XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000
+#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16
+static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val)
+{
+ return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK;
+}
#define REG_A2XX_VGT_IMMED_DATA 0x000021fd
@@ -963,7 +1005,7 @@ static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND(enum adreno_rb_blend
}
#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK 0x000000e0
#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT 5
-static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum adreno_rb_blend_opcode val)
+static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum a2xx_rb_blend_opcode val)
{
return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK;
}
@@ -981,7 +1023,7 @@ static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND(enum adreno_rb_blend
}
#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK 0x00e00000
#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT 21
-static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum adreno_rb_blend_opcode val)
+static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum a2xx_rb_blend_opcode val)
{
return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK;
}
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index a7be56163d23..303e8a9e91a5 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16)
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -41,31 +41,11 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-enum a3xx_render_mode {
- RB_RENDERING_PASS = 0,
- RB_TILING_PASS = 1,
- RB_RESOLVE_PASS = 2,
-};
-
enum a3xx_tile_mode {
LINEAR = 0,
TILE_32X32 = 2,
};
-enum a3xx_threadmode {
- MULTI = 0,
- SINGLE = 1,
-};
-
-enum a3xx_instrbuffermode {
- BUFFER = 1,
-};
-
-enum a3xx_threadsize {
- TWO_QUADS = 0,
- FOUR_QUADS = 1,
-};
-
enum a3xx_state_block_id {
HLSQ_BLOCK_ID_TP_TEX = 2,
HLSQ_BLOCK_ID_TP_MIPMAP = 3,
@@ -169,6 +149,8 @@ enum a3xx_color_fmt {
RB_R8G8B8A8_UNORM = 8,
RB_Z16_UNORM = 12,
RB_A8_UNORM = 20,
+ RB_R16G16B16A16_FLOAT = 27,
+ RB_R32G32B32A32_FLOAT = 51,
};
enum a3xx_color_swap {
@@ -178,12 +160,6 @@ enum a3xx_color_swap {
XYZW = 3,
};
-enum a3xx_msaa_samples {
- MSAA_ONE = 0,
- MSAA_TWO = 1,
- MSAA_FOUR = 2,
-};
-
enum a3xx_sp_perfcounter_select {
SP_FS_CFLOW_INSTRUCTIONS = 12,
SP_FS_FULL_ALU_INSTRUCTIONS = 14,
@@ -191,21 +167,45 @@ enum a3xx_sp_perfcounter_select {
SP_ALU_ACTIVE_CYCLES = 29,
};
-enum adreno_rb_copy_control_mode {
- RB_COPY_RESOLVE = 1,
- RB_COPY_DEPTH_STENCIL = 5,
+enum a3xx_rop_code {
+ ROP_CLEAR = 0,
+ ROP_NOR = 1,
+ ROP_AND_INVERTED = 2,
+ ROP_COPY_INVERTED = 3,
+ ROP_AND_REVERSE = 4,
+ ROP_INVERT = 5,
+ ROP_XOR = 6,
+ ROP_NAND = 7,
+ ROP_AND = 8,
+ ROP_EQUIV = 9,
+ ROP_NOOP = 10,
+ ROP_OR_INVERTED = 11,
+ ROP_COPY = 12,
+ ROP_OR_REVERSE = 13,
+ ROP_OR = 14,
+ ROP_SET = 15,
+};
+
+enum a3xx_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_DST_MINUS_SRC = 2,
+ BLEND_MIN_DST_SRC = 3,
+ BLEND_MAX_DST_SRC = 4,
};
enum a3xx_tex_filter {
A3XX_TEX_NEAREST = 0,
A3XX_TEX_LINEAR = 1,
+ A3XX_TEX_ANISO = 2,
};
enum a3xx_tex_clamp {
A3XX_TEX_REPEAT = 0,
A3XX_TEX_CLAMP_TO_EDGE = 1,
A3XX_TEX_MIRROR_REPEAT = 2,
- A3XX_TEX_CLAMP_NONE = 3,
+ A3XX_TEX_CLAMP_TO_BORDER = 3,
+ A3XX_TEX_MIRROR_CLAMP = 4,
};
enum a3xx_tex_swiz {
@@ -316,6 +316,7 @@ enum a3xx_tex_type {
#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064
#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080
+#define A3XX_RBBM_PERFCTR_CTL_ENABLE 0x00000001
#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD0 0x00000081
@@ -549,6 +550,10 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460
#define REG_A3XX_CP_AHB_FAULT 0x0000054d
+#define REG_A3XX_SP_GLOBAL_MEM_SIZE 0x00000e22
+
+#define REG_A3XX_SP_GLOBAL_MEM_ADDR 0x00000e23
+
#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040
#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000
#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
@@ -556,6 +561,9 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460
#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000
#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000
#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000
+#define A3XX_GRAS_CL_CLIP_CNTL_ZCOORD 0x00800000
+#define A3XX_GRAS_CL_CLIP_CNTL_WCOORD 0x01000000
+#define A3XX_GRAS_CL_CLIP_CNTL_ZCLIP_DISABLE 0x02000000
#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044
#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
@@ -620,8 +628,26 @@ static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
}
#define REG_A3XX_GRAS_SU_POINT_MINMAX 0x00002068
+#define A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069
+#define A3XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A3XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK;
+}
#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c
#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK 0x00ffffff
@@ -743,6 +769,7 @@ static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode va
#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000
#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1
+#define A3XX_RB_RENDER_CONTROL_FACENESS 0x00000008
#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0
#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4
static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
@@ -751,6 +778,10 @@ static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
}
#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
+#define A3XX_RB_RENDER_CONTROL_XCOORD 0x00004000
+#define A3XX_RB_RENDER_CONTROL_YCOORD 0x00008000
+#define A3XX_RB_RENDER_CONTROL_ZCOORD 0x00010000
+#define A3XX_RB_RENDER_CONTROL_WCOORD 0x00020000
#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST 0x00400000
#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000
#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24
@@ -796,7 +827,7 @@ static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4
#define A3XX_RB_MRT_CONTROL_BLEND2 0x00000020
#define A3XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
#define A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
-static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(uint32_t val)
+static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
{
return ((val) << A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A3XX_RB_MRT_CONTROL_ROP_CODE__MASK;
}
@@ -856,7 +887,7 @@ static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_b
}
#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
-static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
}
@@ -874,7 +905,7 @@ static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb
}
#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
-static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
}
@@ -957,17 +988,24 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples
{
return ((val) << A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
}
+#define A3XX_RB_COPY_CONTROL_DEPTHCLEAR 0x00000008
#define A3XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
#define A3XX_RB_COPY_CONTROL_MODE__SHIFT 4
static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
{
return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
}
-#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xfffffc00
-#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 10
+#define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
+#define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
+static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
+{
+ return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
+}
+#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
+#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
{
- return ((val >> 10) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
+ return ((val >> 14) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
}
#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed
@@ -1005,6 +1043,12 @@ static inline uint32_t A3XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
{
return ((val) << A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A3XX_RB_COPY_DEST_INFO_SWAP__MASK;
}
+#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
+#define A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A3XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
+}
#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
static inline uint32_t A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
@@ -1019,6 +1063,7 @@ static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endi
}
#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100
+#define A3XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z 0x00000001
#define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002
#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00000008
@@ -1044,7 +1089,7 @@ static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_form
#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11
static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
{
- return ((val >> 10) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+ return ((val >> 12) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
}
#define REG_A3XX_RB_DEPTH_PITCH 0x00002103
@@ -1172,6 +1217,8 @@ static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val)
}
#define REG_A3XX_RB_SAMPLE_COUNT_CONTROL 0x00002110
+#define A3XX_RB_SAMPLE_COUNT_CONTROL_RESET 0x00000001
+#define A3XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
#define REG_A3XX_RB_SAMPLE_COUNT_ADDR 0x00002111
@@ -1179,7 +1226,23 @@ static inline uint32_t A3XX_RB_WINDOW_OFFSET_Y(uint32_t val)
#define REG_A3XX_RB_Z_CLAMP_MAX 0x00002115
+#define REG_A3XX_VGT_BIN_BASE 0x000021e1
+
+#define REG_A3XX_VGT_BIN_SIZE 0x000021e2
+
#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4
+#define A3XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000
+#define A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16
+static inline uint32_t A3XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
+{
+ return ((val) << A3XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A3XX_PC_VSTREAM_CONTROL_SIZE__MASK;
+}
+#define A3XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000
+#define A3XX_PC_VSTREAM_CONTROL_N__SHIFT 22
+static inline uint32_t A3XX_PC_VSTREAM_CONTROL_N(uint32_t val)
+{
+ return ((val) << A3XX_PC_VSTREAM_CONTROL_N__SHIFT) & A3XX_PC_VSTREAM_CONTROL_N__MASK;
+}
#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea
@@ -1203,6 +1266,7 @@ static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_
return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
}
#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
+#define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
#define REG_A3XX_PC_RESTART_INDEX 0x000021ed
@@ -1232,6 +1296,7 @@ static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize
}
#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
+#define A3XX_HLSQ_CONTROL_1_REG_ZWCOORD 0x02000000
#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202
#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
@@ -1242,6 +1307,12 @@ static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
}
#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203
+#define A3XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff
+#define A3XX_HLSQ_CONTROL_3_REG_REGID__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A3XX_HLSQ_CONTROL_3_REG_REGID__MASK;
+}
#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
@@ -1312,10 +1383,36 @@ static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
}
#define REG_A3XX_HLSQ_CL_NDRANGE_0_REG 0x0000220a
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK 0x00000003
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_WORKDIM__MASK;
+}
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK 0x00000ffc
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT 2
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE0__MASK;
+}
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK 0x003ff000
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT 12
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE1__MASK;
+}
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK 0xffc00000
+#define A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT 22
+static inline uint32_t A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__SHIFT) & A3XX_HLSQ_CL_NDRANGE_0_REG_LOCALSIZE2__MASK;
+}
+
+static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK(uint32_t i0) { return 0x0000220b + 0x2*i0; }
-#define REG_A3XX_HLSQ_CL_NDRANGE_1_REG 0x0000220b
+static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_SIZE(uint32_t i0) { return 0x0000220b + 0x2*i0; }
-#define REG_A3XX_HLSQ_CL_NDRANGE_2_REG 0x0000220c
+static inline uint32_t REG_A3XX_HLSQ_CL_GLOBAL_WORK_OFFSET(uint32_t i0) { return 0x0000220c + 0x2*i0; }
#define REG_A3XX_HLSQ_CL_CONTROL_0_REG 0x00002211
@@ -1323,7 +1420,9 @@ static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214
-#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215
+static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP(uint32_t i0) { return 0x00002215 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_HLSQ_CL_KERNEL_GROUP_RATIO(uint32_t i0) { return 0x00002215 + 0x1*i0; }
#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Y_REG 0x00002216
@@ -1438,6 +1537,12 @@ static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
{
return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
}
+#define A3XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
+#define A3XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
+static inline uint32_t A3XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A3XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
static inline uint32_t A3XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
@@ -1462,12 +1567,13 @@ static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT(uint32_t val
}
#define REG_A3XX_VPC_ATTR 0x00002280
-#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x00000fff
+#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x000001ff
#define A3XX_VPC_ATTR_TOTALATTR__SHIFT 0
static inline uint32_t A3XX_VPC_ATTR_TOTALATTR(uint32_t val)
{
return ((val) << A3XX_VPC_ATTR_TOTALATTR__SHIFT) & A3XX_VPC_ATTR_TOTALATTR__MASK;
}
+#define A3XX_VPC_ATTR_PSIZE 0x00000200
#define A3XX_VPC_ATTR_THRDASSIGN__MASK 0x0ffff000
#define A3XX_VPC_ATTR_THRDASSIGN__SHIFT 12
static inline uint32_t A3XX_VPC_ATTR_THRDASSIGN(uint32_t val)
@@ -1522,11 +1628,11 @@ static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val)
{
return ((val) << A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK;
}
-#define A3XX_SP_SP_CTRL_REG_LOMODE__MASK 0x00c00000
-#define A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT 22
-static inline uint32_t A3XX_SP_SP_CTRL_REG_LOMODE(uint32_t val)
+#define A3XX_SP_SP_CTRL_REG_L0MODE__MASK 0x00c00000
+#define A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT 22
+static inline uint32_t A3XX_SP_SP_CTRL_REG_L0MODE(uint32_t val)
{
- return ((val) << A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_LOMODE__MASK;
+ return ((val) << A3XX_SP_SP_CTRL_REG_L0MODE__SHIFT) & A3XX_SP_SP_CTRL_REG_L0MODE__MASK;
}
#define REG_A3XX_SP_VS_CTRL_REG0 0x000022c4
@@ -1569,6 +1675,7 @@ static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
}
#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
+#define A3XX_SP_VS_CTRL_REG0_COMPUTEMODE 0x00800000
#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000
#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24
static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
@@ -1742,6 +1849,7 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
}
#define A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
#define A3XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
+#define A3XX_SP_FS_CTRL_REG0_COMPUTEMODE 0x00800000
#define A3XX_SP_FS_CTRL_REG0_LENGTH__MASK 0xff000000
#define A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT 24
static inline uint32_t A3XX_SP_FS_CTRL_REG0_LENGTH(uint32_t val)
@@ -1802,6 +1910,13 @@ static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9
#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec
+#define A3XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080
+#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00
+#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8
+static inline uint32_t A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK;
+}
static inline uint32_t REG_A3XX_SP_FS_MRT(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
@@ -1914,6 +2029,42 @@ static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
#define REG_A3XX_VBIF_OUT_AXI_AOOO 0x0000305f
+#define REG_A3XX_VBIF_PERF_CNT_EN 0x00003070
+#define A3XX_VBIF_PERF_CNT_EN_CNT0 0x00000001
+#define A3XX_VBIF_PERF_CNT_EN_CNT1 0x00000002
+#define A3XX_VBIF_PERF_CNT_EN_PWRCNT0 0x00000004
+#define A3XX_VBIF_PERF_CNT_EN_PWRCNT1 0x00000008
+#define A3XX_VBIF_PERF_CNT_EN_PWRCNT2 0x00000010
+
+#define REG_A3XX_VBIF_PERF_CNT_CLR 0x00003071
+#define A3XX_VBIF_PERF_CNT_CLR_CNT0 0x00000001
+#define A3XX_VBIF_PERF_CNT_CLR_CNT1 0x00000002
+#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT0 0x00000004
+#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT1 0x00000008
+#define A3XX_VBIF_PERF_CNT_CLR_PWRCNT2 0x00000010
+
+#define REG_A3XX_VBIF_PERF_CNT_SEL 0x00003072
+
+#define REG_A3XX_VBIF_PERF_CNT0_LO 0x00003073
+
+#define REG_A3XX_VBIF_PERF_CNT0_HI 0x00003074
+
+#define REG_A3XX_VBIF_PERF_CNT1_LO 0x00003075
+
+#define REG_A3XX_VBIF_PERF_CNT1_HI 0x00003076
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT0_LO 0x00003077
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT0_HI 0x00003078
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT1_LO 0x00003079
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT1_HI 0x0000307a
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT2_LO 0x0000307b
+
+#define REG_A3XX_VBIF_PERF_PWR_CNT2_HI 0x0000307c
+
#define REG_A3XX_VSC_BIN_SIZE 0x00000c01
#define A3XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
@@ -2080,6 +2231,8 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op
}
#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE 0x80000000
+#define REG_A3XX_UNKNOWN_0EA6 0x00000ea6
+
#define REG_A3XX_SP_PERFCOUNTER0_SELECT 0x00000ec4
#define REG_A3XX_SP_PERFCOUNTER1_SELECT 0x00000ec5
@@ -2117,6 +2270,39 @@ static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_op
#define REG_A3XX_VGT_EVENT_INITIATOR 0x000021f9
#define REG_A3XX_VGT_DRAW_INITIATOR 0x000021fc
+#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK 0x0000003f
+#define A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT 0
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_PRIM_TYPE__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK 0x000000c0
+#define A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__SHIFT) & A3XX_VGT_DRAW_INITIATOR_SOURCE_SELECT__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK 0x00000600
+#define A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT 9
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_VIS_CULL__SHIFT) & A3XX_VGT_DRAW_INITIATOR_VIS_CULL__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK 0x00000800
+#define A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT 11
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__SHIFT) & A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE__MASK;
+}
+#define A3XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
+#define A3XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
+#define A3XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000
+#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16
+static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val)
+{
+ return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK;
+}
#define REG_A3XX_VGT_IMMED_DATA 0x000021fd
@@ -2152,6 +2338,12 @@ static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val)
{
return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK;
}
+#define A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK 0x00700000
+#define A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT 20
+static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_COMPARE_FUNC__SHIFT) & A3XX_TEX_SAMP_0_COMPARE_FUNC__MASK;
+}
#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
#define REG_A3XX_TEX_SAMP_1 0x00000001
@@ -2170,6 +2362,7 @@ static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
#define REG_A3XX_TEX_CONST_0 0x00000000
#define A3XX_TEX_CONST_0_TILED 0x00000001
+#define A3XX_TEX_CONST_0_SRGB 0x00000004
#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4
static inline uint32_t A3XX_TEX_CONST_0_SWIZ_X(enum a3xx_tex_swiz val)
@@ -2206,6 +2399,7 @@ static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
{
return ((val) << A3XX_TEX_CONST_0_FMT__SHIFT) & A3XX_TEX_CONST_0_FMT__MASK;
}
+#define A3XX_TEX_CONST_0_NOCONVERT 0x20000000
#define A3XX_TEX_CONST_0_TYPE__MASK 0xc0000000
#define A3XX_TEX_CONST_0_TYPE__SHIFT 30
static inline uint32_t A3XX_TEX_CONST_0_TYPE(enum a3xx_tex_type val)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 942e09d898a8..2773600c9488 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -392,13 +392,10 @@ static const unsigned int a3xx_registers[] = {
#ifdef CONFIG_DEBUG_FS
static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
- struct drm_device *dev = gpu->dev;
int i;
adreno_show(gpu, m);
- mutex_lock(&dev->struct_mutex);
-
gpu->funcs->pm_resume(gpu);
seq_printf(m, "status: %08x\n",
@@ -418,8 +415,6 @@ static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
}
gpu->funcs->pm_suspend(gpu);
-
- mutex_unlock(&dev->struct_mutex);
}
#endif
@@ -685,6 +680,8 @@ static int a3xx_remove(struct platform_device *pdev)
}
static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,adreno-3xx" },
+ /* for backwards compat w/ downstream kgsl DT files: */
{ .compatible = "qcom,kgsl-3d0" },
{}
};
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
index bb9a8ca0507b..85ff66cbddd6 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -19,6 +19,11 @@
#define __A3XX_GPU_H__
#include "adreno_gpu.h"
+
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
#include "a3xx.xml.h"
struct a3xx_gpu {
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index d6e6ce2d1abd..9de19ac2e86c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16)
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -87,15 +87,6 @@ enum adreno_rb_blend_factor {
FACTOR_SRC_ALPHA_SATURATE = 16,
};
-enum adreno_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_MIN_DST_SRC = 2,
- BLEND_MAX_DST_SRC = 3,
- BLEND_DST_MINUS_SRC = 4,
- BLEND_DST_PLUS_SRC_BIAS = 5,
-};
-
enum adreno_rb_surface_endian {
ENDIAN_NONE = 0,
ENDIAN_8IN16 = 1,
@@ -116,6 +107,39 @@ enum adreno_rb_depth_format {
DEPTHX_24_8 = 1,
};
+enum adreno_rb_copy_control_mode {
+ RB_COPY_RESOLVE = 1,
+ RB_COPY_CLEAR = 2,
+ RB_COPY_DEPTH_STENCIL = 5,
+};
+
+enum a3xx_render_mode {
+ RB_RENDERING_PASS = 0,
+ RB_TILING_PASS = 1,
+ RB_RESOLVE_PASS = 2,
+ RB_COMPUTE_PASS = 3,
+};
+
+enum a3xx_msaa_samples {
+ MSAA_ONE = 0,
+ MSAA_TWO = 1,
+ MSAA_FOUR = 2,
+};
+
+enum a3xx_threadmode {
+ MULTI = 0,
+ SINGLE = 1,
+};
+
+enum a3xx_instrbuffermode {
+ BUFFER = 1,
+};
+
+enum a3xx_threadsize {
+ TWO_QUADS = 0,
+ FOUR_QUADS = 1,
+};
+
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1
@@ -264,6 +288,8 @@ static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
#define REG_AXXX_CP_INT_ACK 0x000001f4
#define REG_AXXX_CP_ME_CNTL 0x000001f6
+#define AXXX_CP_ME_CNTL_BUSY 0x20000000
+#define AXXX_CP_ME_CNTL_HALT 0x10000000
#define REG_AXXX_CP_ME_STATUS 0x000001f7
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 28ca8cd8b09e..655ce5b14ad0 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -91,9 +91,17 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
int adreno_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
DBG("%s", gpu->name);
+ ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
+ if (ret) {
+ gpu->rb_iova = 0;
+ dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
+ return ret;
+ }
+
/* Setup REG_CP_RB_CNTL: */
gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
/* size is log2(quad-words): */
@@ -362,8 +370,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
+ mutex_lock(&drm->struct_mutex);
gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
MSM_BO_UNCACHED);
+ mutex_unlock(&drm->struct_mutex);
if (IS_ERR(gpu->memptrs_bo)) {
ret = PTR_ERR(gpu->memptrs_bo);
gpu->memptrs_bo = NULL;
@@ -371,13 +381,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
- gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo);
+ gpu->memptrs = msm_gem_vaddr(gpu->memptrs_bo);
if (!gpu->memptrs) {
dev_err(drm->dev, "could not vmap memptrs\n");
return -ENOMEM;
}
- ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id,
+ ret = msm_gem_get_iova(gpu->memptrs_bo, gpu->base.id,
&gpu->memptrs_iova);
if (ret) {
dev_err(drm->dev, "could not map memptrs: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index ae992c71703f..4eee0ec8f069 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32814 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 8900 bytes, from 2013-10-22 23:57:49)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 10574 bytes, from 2013-11-13 05:44:45)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 53644 bytes, from 2013-11-30 15:07:33)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 8344 bytes, from 2013-11-30 14:49:47)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14477 bytes, from 2014-05-16 11:51:57)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-06-25 12:57:16)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 26602 bytes, from 2014-06-25 12:57:16)
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -105,6 +105,7 @@ enum pc_di_index_size {
enum pc_di_vis_cull_mode {
IGNORE_VISIBILITY = 0,
+ USE_VISIBILITY = 1,
};
enum adreno_pm4_packet_type {
@@ -163,6 +164,11 @@ enum adreno_pm4_type3_packets {
CP_SET_BIN = 76,
CP_TEST_TWO_MEMS = 113,
CP_WAIT_FOR_ME = 19,
+ CP_SET_DRAW_STATE = 67,
+ CP_DRAW_INDX_OFFSET = 56,
+ CP_DRAW_INDIRECT = 40,
+ CP_DRAW_INDX_INDIRECT = 41,
+ CP_DRAW_AUTO = 36,
IN_IB_PREFETCH_END = 23,
IN_SUBBLK_PREFETCH = 31,
IN_INSTR_PREFETCH = 32,
@@ -232,6 +238,211 @@ static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
}
+#define REG_CP_DRAW_INDX_0 0x00000000
+#define CP_DRAW_INDX_0_VIZ_QUERY__MASK 0xffffffff
+#define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_0_VIZ_QUERY(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_0_VIZ_QUERY__MASK;
+}
+
+#define REG_CP_DRAW_INDX_1 0x00000001
+#define CP_DRAW_INDX_1_PRIM_TYPE__MASK 0x0000003f
+#define CP_DRAW_INDX_1_PRIM_TYPE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_1_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << CP_DRAW_INDX_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_1_PRIM_TYPE__MASK;
+}
+#define CP_DRAW_INDX_1_SOURCE_SELECT__MASK 0x000000c0
+#define CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT 6
+static inline uint32_t CP_DRAW_INDX_1_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << CP_DRAW_INDX_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_1_SOURCE_SELECT__MASK;
+}
+#define CP_DRAW_INDX_1_VIS_CULL__MASK 0x00000600
+#define CP_DRAW_INDX_1_VIS_CULL__SHIFT 9
+static inline uint32_t CP_DRAW_INDX_1_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_1_VIS_CULL__MASK;
+}
+#define CP_DRAW_INDX_1_INDEX_SIZE__MASK 0x00000800
+#define CP_DRAW_INDX_1_INDEX_SIZE__SHIFT 11
+static inline uint32_t CP_DRAW_INDX_1_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << CP_DRAW_INDX_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_1_INDEX_SIZE__MASK;
+}
+#define CP_DRAW_INDX_1_NOT_EOP 0x00001000
+#define CP_DRAW_INDX_1_SMALL_INDEX 0x00002000
+#define CP_DRAW_INDX_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define CP_DRAW_INDX_1_NUM_INDICES__MASK 0xffff0000
+#define CP_DRAW_INDX_1_NUM_INDICES__SHIFT 16
+static inline uint32_t CP_DRAW_INDX_1_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_1_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2 0x00000002
+#define CP_DRAW_INDX_2_NUM_INDICES__MASK 0xffffffff
+#define CP_DRAW_INDX_2_NUM_INDICES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2 0x00000002
+#define CP_DRAW_INDX_2_INDX_BASE__MASK 0xffffffff
+#define CP_DRAW_INDX_2_INDX_BASE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_INDX_BASE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_2_INDX_BASE__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2 0x00000002
+#define CP_DRAW_INDX_2_INDX_SIZE__MASK 0xffffffff
+#define CP_DRAW_INDX_2_INDX_SIZE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_INDX_SIZE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_2_INDX_SIZE__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2_0 0x00000000
+#define CP_DRAW_INDX_2_0_VIZ_QUERY__MASK 0xffffffff
+#define CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_0_VIZ_QUERY(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_0_VIZ_QUERY__SHIFT) & CP_DRAW_INDX_2_0_VIZ_QUERY__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2_1 0x00000001
+#define CP_DRAW_INDX_2_1_PRIM_TYPE__MASK 0x0000003f
+#define CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_1_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_2_1_PRIM_TYPE__MASK;
+}
+#define CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK 0x000000c0
+#define CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT 6
+static inline uint32_t CP_DRAW_INDX_2_1_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_2_1_SOURCE_SELECT__MASK;
+}
+#define CP_DRAW_INDX_2_1_VIS_CULL__MASK 0x00000600
+#define CP_DRAW_INDX_2_1_VIS_CULL__SHIFT 9
+static inline uint32_t CP_DRAW_INDX_2_1_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_VIS_CULL__SHIFT) & CP_DRAW_INDX_2_1_VIS_CULL__MASK;
+}
+#define CP_DRAW_INDX_2_1_INDEX_SIZE__MASK 0x00000800
+#define CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT 11
+static inline uint32_t CP_DRAW_INDX_2_1_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_2_1_INDEX_SIZE__MASK;
+}
+#define CP_DRAW_INDX_2_1_NOT_EOP 0x00001000
+#define CP_DRAW_INDX_2_1_SMALL_INDEX 0x00002000
+#define CP_DRAW_INDX_2_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define CP_DRAW_INDX_2_1_NUM_INDICES__MASK 0xffff0000
+#define CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT 16
+static inline uint32_t CP_DRAW_INDX_2_1_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_2_2 0x00000002
+#define CP_DRAW_INDX_2_2_NUM_INDICES__MASK 0xffffffff
+#define CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_2_2_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_2_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_2_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_0 0x00000000
+#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK 0x0000003f
+#define CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__SHIFT) & CP_DRAW_INDX_OFFSET_0_PRIM_TYPE__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK 0x000000c0
+#define CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000700
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000800
+#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 11
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum pc_di_index_size val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK;
+}
+#define CP_DRAW_INDX_OFFSET_0_NOT_EOP 0x00001000
+#define CP_DRAW_INDX_OFFSET_0_SMALL_INDEX 0x00002000
+#define CP_DRAW_INDX_OFFSET_0_PRE_DRAW_INITIATOR_ENABLE 0x00004000
+#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK 0xffff0000
+#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT 16
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001
+
+#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
+#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_2_NUM_INDICES(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
+#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_BASE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK;
+}
+
+#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002
+#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK 0xffffffff
+#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT 0
+static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_SIZE(uint32_t val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK;
+}
+
+#define REG_CP_SET_DRAW_STATE_0 0x00000000
+#define CP_SET_DRAW_STATE_0_COUNT__MASK 0x0000ffff
+#define CP_SET_DRAW_STATE_0_COUNT__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE_0_COUNT(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE_0_COUNT__SHIFT) & CP_SET_DRAW_STATE_0_COUNT__MASK;
+}
+#define CP_SET_DRAW_STATE_0_DIRTY 0x00010000
+#define CP_SET_DRAW_STATE_0_DISABLE 0x00020000
+#define CP_SET_DRAW_STATE_0_DISABLE_ALL_GROUPS 0x00040000
+#define CP_SET_DRAW_STATE_0_LOAD_IMMED 0x00080000
+#define CP_SET_DRAW_STATE_0_GROUP_ID__MASK 0x1f000000
+#define CP_SET_DRAW_STATE_0_GROUP_ID__SHIFT 24
+static inline uint32_t CP_SET_DRAW_STATE_0_GROUP_ID(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE_0_GROUP_ID__SHIFT) & CP_SET_DRAW_STATE_0_GROUP_ID__MASK;
+}
+
+#define REG_CP_SET_DRAW_STATE_1 0x00000001
+#define CP_SET_DRAW_STATE_1_ADDR__MASK 0xffffffff
+#define CP_SET_DRAW_STATE_1_ADDR__SHIFT 0
+static inline uint32_t CP_SET_DRAW_STATE_1_ADDR(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE_1_ADDR__SHIFT) & CP_SET_DRAW_STATE_1_ADDR__MASK;
+}
+
#define REG_CP_SET_BIN_0 0x00000000
#define REG_CP_SET_BIN_1 0x00000001
@@ -262,5 +473,21 @@ static inline uint32_t CP_SET_BIN_2_Y2(uint32_t val)
return ((val) << CP_SET_BIN_2_Y2__SHIFT) & CP_SET_BIN_2_Y2__MASK;
}
+#define REG_CP_SET_BIN_DATA_0 0x00000000
+#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK 0xffffffff
+#define CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA_0_BIN_DATA_ADDR(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA_0_BIN_DATA_ADDR__SHIFT) & CP_SET_BIN_DATA_0_BIN_DATA_ADDR__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA_1 0x00000001
+#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK 0xffffffff
+#define CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT) & CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK;
+}
+
#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 87be647e3825..0f1f5b9459a5 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 747a6ef4211f..d468f86f637c 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 48e03acf19bf..da8740054cdf 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 7f7aadef8a82..a125a7e32742 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -123,7 +123,8 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
for (i = 0; i < config->hpd_reg_cnt; i++) {
struct regulator *reg;
- reg = devm_regulator_get(&pdev->dev, config->hpd_reg_names[i]);
+ reg = devm_regulator_get_exclusive(&pdev->dev,
+ config->hpd_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n",
@@ -138,7 +139,8 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
for (i = 0; i < config->pwr_reg_cnt; i++) {
struct regulator *reg;
- reg = devm_regulator_get(&pdev->dev, config->pwr_reg_names[i]);
+ reg = devm_regulator_get_exclusive(&pdev->dev,
+ config->pwr_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n",
@@ -266,37 +268,56 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
{
int gpio = of_get_named_gpio(of_node, name, 0);
if (gpio < 0) {
- dev_err(dev, "failed to get gpio: %s (%d)\n",
- name, gpio);
- gpio = -1;
+ char name2[32];
+ snprintf(name2, sizeof(name2), "%s-gpio", name);
+ gpio = of_get_named_gpio(of_node, name2, 0);
+ if (gpio < 0) {
+ dev_err(dev, "failed to get gpio: %s (%d)\n",
+ name, gpio);
+ gpio = -1;
+ }
}
return gpio;
}
- /* TODO actually use DT.. */
- static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
- static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
- static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
- static unsigned long hpd_clk_freq[] = {0, 19200000, 0};
- static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
+ if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) {
+ static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
+ static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
+ static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
+ static unsigned long hpd_clk_freq[] = {0, 19200000, 0};
+ static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
+ config.phy_init = hdmi_phy_8x74_init;
+ config.hpd_reg_names = hpd_reg_names;
+ config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
+ config.pwr_reg_names = pwr_reg_names;
+ config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names);
+ config.hpd_clk_names = hpd_clk_names;
+ config.hpd_freq = hpd_clk_freq;
+ config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
+ config.pwr_clk_names = pwr_clk_names;
+ config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
+ config.shared_irq = true;
+ } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) {
+ static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"};
+ static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"};
+ config.phy_init = hdmi_phy_8960_init;
+ config.hpd_reg_names = hpd_reg_names;
+ config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
+ config.hpd_clk_names = hpd_clk_names;
+ config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
+ } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8660")) {
+ config.phy_init = hdmi_phy_8x60_init;
+ } else {
+ dev_err(dev, "unknown phy: %s\n", of_node->name);
+ }
- config.phy_init = hdmi_phy_8x74_init;
config.mmio_name = "core_physical";
- config.hpd_reg_names = hpd_reg_names;
- config.hpd_reg_cnt = ARRAY_SIZE(hpd_reg_names);
- config.pwr_reg_names = pwr_reg_names;
- config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names);
- config.hpd_clk_names = hpd_clk_names;
- config.hpd_freq = hpd_clk_freq;
- config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
- config.pwr_clk_names = pwr_clk_names;
- config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
config.ddc_clk_gpio = get_gpio("qcom,hdmi-tx-ddc-clk");
config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data");
config.hpd_gpio = get_gpio("qcom,hdmi-tx-hpd");
config.mux_en_gpio = get_gpio("qcom,hdmi-tx-mux-en");
config.mux_sel_gpio = get_gpio("qcom,hdmi-tx-mux-sel");
- config.shared_irq = true;
+ config.mux_lpm_gpio = get_gpio("qcom,hdmi-tx-mux-lpm");
#else
static const char *hpd_clk_names[] = {
@@ -373,7 +394,9 @@ static int hdmi_dev_remove(struct platform_device *pdev)
}
static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,hdmi-tx" },
+ { .compatible = "qcom,hdmi-tx-8074" },
+ { .compatible = "qcom,hdmi-tx-8960" },
+ { .compatible = "qcom,hdmi-tx-8660" },
{}
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 9d7723c6528a..b981995410b5 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -96,6 +96,7 @@ struct hdmi_platform_config {
/* gpio's: */
int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
+ int mux_lpm_gpio;
/* older devices had their own irq, mdp5+ it is shared w/ mdp: */
bool shared_irq;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index e2636582cfd7..e89fe053d375 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -12,14 +12,14 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -148,9 +148,9 @@ static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4*
static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; }
-static inline uint32_t REG_HDMI_ACR(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
+static inline uint32_t REG_HDMI_ACR(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; }
-static inline uint32_t REG_HDMI_ACR_0(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
+static inline uint32_t REG_HDMI_ACR_0(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; }
#define HDMI_ACR_0_CTS__MASK 0xfffff000
#define HDMI_ACR_0_CTS__SHIFT 12
static inline uint32_t HDMI_ACR_0_CTS(uint32_t val)
@@ -158,7 +158,7 @@ static inline uint32_t HDMI_ACR_0_CTS(uint32_t val)
return ((val) << HDMI_ACR_0_CTS__SHIFT) & HDMI_ACR_0_CTS__MASK;
}
-static inline uint32_t REG_HDMI_ACR_1(uint32_t i0) { return 0x000000c8 + 0x8*i0; }
+static inline uint32_t REG_HDMI_ACR_1(enum hdmi_acr_cts i0) { return 0x000000c8 + 0x8*i0; }
#define HDMI_ACR_1_N__MASK 0xffffffff
#define HDMI_ACR_1_N__SHIFT 0
static inline uint32_t HDMI_ACR_1_N(uint32_t val)
@@ -552,6 +552,103 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
#define REG_HDMI_8960_PHY_REG11 0x0000042c
#define REG_HDMI_8960_PHY_REG12 0x00000430
+#define HDMI_8960_PHY_REG12_SW_RESET 0x00000020
+#define HDMI_8960_PHY_REG12_PWRDN_B 0x00000080
+
+#define REG_HDMI_8960_PHY_REG_BIST_CFG 0x00000434
+
+#define REG_HDMI_8960_PHY_DEBUG_BUS_SEL 0x00000438
+
+#define REG_HDMI_8960_PHY_REG_MISC0 0x0000043c
+
+#define REG_HDMI_8960_PHY_REG13 0x00000440
+
+#define REG_HDMI_8960_PHY_REG14 0x00000444
+
+#define REG_HDMI_8960_PHY_REG15 0x00000448
+
+#define REG_HDMI_8960_PHY_PLL_REFCLK_CFG 0x00000500
+
+#define REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG 0x00000504
+
+#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 0x00000508
+
+#define REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 0x0000050c
+
+#define REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG 0x00000510
+
+#define REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG 0x00000514
+
+#define REG_HDMI_8960_PHY_PLL_PWRDN_B 0x00000518
+#define HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL 0x00000002
+#define HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B 0x00000008
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG0 0x0000051c
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG1 0x00000520
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG2 0x00000524
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG3 0x00000528
+
+#define REG_HDMI_8960_PHY_PLL_SDM_CFG4 0x0000052c
+
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG0 0x00000530
+
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG1 0x00000534
+
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG2 0x00000538
+
+#define REG_HDMI_8960_PHY_PLL_SSC_CFG3 0x0000053c
+
+#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 0x00000540
+
+#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 0x00000544
+
+#define REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 0x00000548
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 0x0000054c
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 0x00000550
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 0x00000554
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 0x00000558
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 0x0000055c
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 0x00000560
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 0x00000564
+
+#define REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 0x00000568
+
+#define REG_HDMI_8960_PHY_PLL_DEBUG_SEL 0x0000056c
+
+#define REG_HDMI_8960_PHY_PLL_MISC0 0x00000570
+
+#define REG_HDMI_8960_PHY_PLL_MISC1 0x00000574
+
+#define REG_HDMI_8960_PHY_PLL_MISC2 0x00000578
+
+#define REG_HDMI_8960_PHY_PLL_MISC3 0x0000057c
+
+#define REG_HDMI_8960_PHY_PLL_MISC4 0x00000580
+
+#define REG_HDMI_8960_PHY_PLL_MISC5 0x00000584
+
+#define REG_HDMI_8960_PHY_PLL_MISC6 0x00000588
+
+#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS0 0x0000058c
+
+#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS1 0x00000590
+
+#define REG_HDMI_8960_PHY_PLL_DEBUG_BUS2 0x00000594
+
+#define REG_HDMI_8960_PHY_PLL_STATUS0 0x00000598
+#define HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK 0x00000001
+
+#define REG_HDMI_8960_PHY_PLL_STATUS1 0x0000059c
#define REG_HDMI_8x74_ANA_CFG0 0x00000000
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 28f7e3ec6c28..4aca2a3c667c 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -63,7 +63,7 @@ static int gpio_config(struct hdmi *hdmi, bool on)
ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN");
if (ret) {
dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
- "HDMI_MUX_SEL", config->mux_en_gpio, ret);
+ "HDMI_MUX_EN", config->mux_en_gpio, ret);
goto error4;
}
gpio_set_value_cansleep(config->mux_en_gpio, 1);
@@ -78,6 +78,19 @@ static int gpio_config(struct hdmi *hdmi, bool on)
}
gpio_set_value_cansleep(config->mux_sel_gpio, 0);
}
+
+ if (config->mux_lpm_gpio != -1) {
+ ret = gpio_request(config->mux_lpm_gpio,
+ "HDMI_MUX_LPM");
+ if (ret) {
+ dev_err(dev->dev,
+ "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_MUX_LPM",
+ config->mux_lpm_gpio, ret);
+ goto error6;
+ }
+ gpio_set_value_cansleep(config->mux_lpm_gpio, 1);
+ }
DBG("gpio on");
} else {
gpio_free(config->ddc_clk_gpio);
@@ -93,11 +106,19 @@ static int gpio_config(struct hdmi *hdmi, bool on)
gpio_set_value_cansleep(config->mux_sel_gpio, 1);
gpio_free(config->mux_sel_gpio);
}
+
+ if (config->mux_lpm_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_lpm_gpio, 0);
+ gpio_free(config->mux_lpm_gpio);
+ }
DBG("gpio off");
}
return 0;
+error6:
+ if (config->mux_sel_gpio != -1)
+ gpio_free(config->mux_sel_gpio);
error5:
if (config->mux_en_gpio != -1)
gpio_free(config->mux_en_gpio);
@@ -306,7 +327,7 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
hdp_disable(hdmi_connector);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
hdmi_unreference(hdmi_connector->hdmi);
@@ -416,7 +437,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
ret = hpd_enable(hdmi_connector);
if (ret) {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index e5b7ed5b8f01..902d7685d441 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -15,13 +15,370 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
#include "hdmi.h"
struct hdmi_phy_8960 {
struct hdmi_phy base;
struct hdmi *hdmi;
+ struct clk_hw pll_hw;
+ struct clk *pll;
+ unsigned long pixclk;
};
#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
+#define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw)
+
+/*
+ * HDMI PLL:
+ *
+ * To get the parent clock setup properly, we need to plug in hdmi pll
+ * configuration into common-clock-framework.
+ */
+
+struct pll_rate {
+ unsigned long rate;
+ struct {
+ uint32_t val;
+ uint32_t reg;
+ } conf[32];
+};
+
+/* NOTE: keep sorted highest freq to lowest: */
+static const struct pll_rate freqtbl[] = {
+ /* 1080p60/1080p50 case */
+ { 148500000, {
+ { 0x02, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
+ { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
+ { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
+ { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
+ { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ { 0, 0 } }
+ },
+ { 108000000, {
+ { 0x08, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x21, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x1c, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x49, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0, 0 } }
+ },
+ /* 720p60/720p50/1080i60/1080i50/1080p24/1080p30/1080p25 */
+ { 74250000, {
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x12, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x76, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0xe6, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0, 0 } }
+ },
+ { 65000000, {
+ { 0x18, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xf9, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x8a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x0b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4b, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x09, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0, 0 } }
+ },
+ /* 480p60/480i60 */
+ { 27030000, {
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x38, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x20, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0xff, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4e, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0xd7, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ { 0, 0 } }
+ },
+ /* 576p50/576i50 */
+ { 27000000, {
+ { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
+ { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x7b, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x01, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
+ { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
+ { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
+ { 0x2a, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x03, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ { 0, 0 } }
+ },
+ /* 640x480p60 */
+ { 25200000, {
+ { 0x32, REG_HDMI_8960_PHY_PLL_REFCLK_CFG },
+ { 0x02, REG_HDMI_8960_PHY_PLL_CHRG_PUMP_CFG },
+ { 0x01, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG0 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_LOOP_FLT_CFG1 },
+ { 0x2c, REG_HDMI_8960_PHY_PLL_IDAC_ADJ_CFG },
+ { 0x06, REG_HDMI_8960_PHY_PLL_I_VI_KVCO_CFG },
+ { 0x0a, REG_HDMI_8960_PHY_PLL_PWRDN_B },
+ { 0x77, REG_HDMI_8960_PHY_PLL_SDM_CFG0 },
+ { 0x4c, REG_HDMI_8960_PHY_PLL_SDM_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG2 },
+ { 0xc0, REG_HDMI_8960_PHY_PLL_SDM_CFG3 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SDM_CFG4 },
+ { 0x9a, REG_HDMI_8960_PHY_PLL_SSC_CFG0 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG1 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_SSC_CFG2 },
+ { 0x20, REG_HDMI_8960_PHY_PLL_SSC_CFG3 },
+ { 0x10, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0 },
+ { 0x1a, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1 },
+ { 0x0d, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2 },
+ { 0xf4, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG0 },
+ { 0x02, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG1 },
+ { 0x3b, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG2 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG3 },
+ { 0x86, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG4 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG5 },
+ { 0x33, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG6 },
+ { 0x00, REG_HDMI_8960_PHY_PLL_VCOCAL_CFG7 },
+ { 0, 0 } }
+ },
+};
+
+static int hdmi_pll_enable(struct clk_hw *hw)
+{
+ struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
+ struct hdmi *hdmi = phy_8960->hdmi;
+ int timeout_count, pll_lock_retry = 10;
+ unsigned int val;
+
+ DBG("");
+
+ /* Assert PLL S/W reset */
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG0, 0x10);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG1, 0x1a);
+
+ /* Wait for a short time before de-asserting
+ * to allow the hardware to complete its job.
+ * This much of delay should be fine for hardware
+ * to assert and de-assert.
+ */
+ udelay(10);
+
+ /* De-assert PLL S/W reset */
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
+
+ val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
+ val |= HDMI_8960_PHY_REG12_SW_RESET;
+ /* Assert PHY S/W reset */
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
+ val &= ~HDMI_8960_PHY_REG12_SW_RESET;
+ /* Wait for a short time before de-asserting
+ to allow the hardware to complete its job.
+ This much of delay should be fine for hardware
+ to assert and de-assert. */
+ udelay(10);
+ /* De-assert PHY S/W reset */
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x3f);
+
+ val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
+ val |= HDMI_8960_PHY_REG12_PWRDN_B;
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
+ /* Wait 10 us for enabling global power for PHY */
+ mb();
+ udelay(10);
+
+ val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B);
+ val |= HDMI_8960_PHY_PLL_PWRDN_B_PLL_PWRDN_B;
+ val &= ~HDMI_8960_PHY_PLL_PWRDN_B_PD_PLL;
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x80);
+
+ timeout_count = 1000;
+ while (--pll_lock_retry > 0) {
+
+ /* are we there yet? */
+ val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_STATUS0);
+ if (val & HDMI_8960_PHY_PLL_STATUS0_PLL_LOCK)
+ break;
+
+ udelay(1);
+
+ if (--timeout_count > 0)
+ continue;
+
+ /*
+ * PLL has still not locked.
+ * Do a software reset and try again
+ * Assert PLL S/W reset first
+ */
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x8d);
+ udelay(10);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_LOCKDET_CFG2, 0x0d);
+
+ /*
+ * Wait for a short duration for the PLL calibration
+ * before checking if the PLL gets locked
+ */
+ udelay(350);
+
+ timeout_count = 1000;
+ }
+
+ return 0;
+}
+
+static void hdmi_pll_disable(struct clk_hw *hw)
+{
+ struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
+ struct hdmi *hdmi = phy_8960->hdmi;
+ unsigned int val;
+
+ DBG("");
+
+ val = hdmi_read(hdmi, REG_HDMI_8960_PHY_REG12);
+ val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG12, val);
+
+ val = hdmi_read(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B);
+ val |= HDMI_8960_PHY_REG12_SW_RESET;
+ val &= ~HDMI_8960_PHY_REG12_PWRDN_B;
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_PLL_PWRDN_B, val);
+ /* Make sure HDMI PHY/PLL are powered down */
+ mb();
+}
+
+static const struct pll_rate *find_rate(unsigned long rate)
+{
+ int i;
+ for (i = 1; i < ARRAY_SIZE(freqtbl); i++)
+ if (rate > freqtbl[i].rate)
+ return &freqtbl[i-1];
+ return &freqtbl[i-1];
+}
+
+static unsigned long hdmi_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
+ return phy_8960->pixclk;
+}
+
+static long hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ const struct pll_rate *pll_rate = find_rate(rate);
+ return pll_rate->rate;
+}
+
+static int hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct hdmi_phy_8960 *phy_8960 = clk_to_phy(hw);
+ struct hdmi *hdmi = phy_8960->hdmi;
+ const struct pll_rate *pll_rate = find_rate(rate);
+ int i;
+
+ DBG("rate=%lu", rate);
+
+ for (i = 0; pll_rate->conf[i].reg; i++)
+ hdmi_write(hdmi, pll_rate->conf[i].reg, pll_rate->conf[i].val);
+
+ phy_8960->pixclk = rate;
+
+ return 0;
+}
+
+
+static const struct clk_ops hdmi_pll_ops = {
+ .enable = hdmi_pll_enable,
+ .disable = hdmi_pll_disable,
+ .recalc_rate = hdmi_pll_recalc_rate,
+ .round_rate = hdmi_pll_round_rate,
+ .set_rate = hdmi_pll_set_rate,
+};
+
+static const char *hdmi_pll_parents[] = {
+ "pxo",
+};
+
+static struct clk_init_data pll_init = {
+ .name = "hdmi_pll",
+ .ops = &hdmi_pll_ops,
+ .parent_names = hdmi_pll_parents,
+ .num_parents = ARRAY_SIZE(hdmi_pll_parents),
+};
+
+
+/*
+ * HDMI Phy:
+ */
static void hdmi_phy_8960_destroy(struct hdmi_phy *phy)
{
@@ -86,6 +443,9 @@ static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
struct hdmi *hdmi = phy_8960->hdmi;
+ DBG("pixclock: %lu", pixclock);
+
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x00);
hdmi_write(hdmi, REG_HDMI_8960_PHY_REG0, 0x1b);
hdmi_write(hdmi, REG_HDMI_8960_PHY_REG1, 0xf2);
hdmi_write(hdmi, REG_HDMI_8960_PHY_REG4, 0x00);
@@ -104,6 +464,8 @@ static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
struct hdmi *hdmi = phy_8960->hdmi;
+ DBG("");
+
hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x7f);
}
@@ -118,7 +480,12 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
{
struct hdmi_phy_8960 *phy_8960;
struct hdmi_phy *phy = NULL;
- int ret;
+ int ret, i;
+
+ /* sanity check: */
+ for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
+ if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate))
+ return ERR_PTR(-EINVAL);
phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
if (!phy_8960) {
@@ -132,6 +499,14 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
phy_8960->hdmi = hdmi;
+ phy_8960->pll_hw.init = &pll_init;
+ phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw);
+ if (IS_ERR(phy_8960->pll)) {
+ ret = PTR_ERR(phy_8960->pll);
+ phy_8960->pll = NULL;
+ goto fail;
+ }
+
return phy;
fail:
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index d591567173c4..bd81db6a7829 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 416a26e1e58d..122208e8a2ee 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 0bb4faa17523..733646c0d3f8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -147,7 +147,7 @@ static void mdp4_destroy(struct msm_kms *kms)
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
if (mdp4_kms->blank_cursor_bo)
- drm_gem_object_unreference(mdp4_kms->blank_cursor_bo);
+ drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
kfree(mdp4_kms);
}
@@ -176,6 +176,8 @@ int mdp4_disable(struct mdp4_kms *mdp4_kms)
if (mdp4_kms->pclk)
clk_disable_unprepare(mdp4_kms->pclk);
clk_disable_unprepare(mdp4_kms->lut_clk);
+ if (mdp4_kms->axi_clk)
+ clk_disable_unprepare(mdp4_kms->axi_clk);
return 0;
}
@@ -188,6 +190,8 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
if (mdp4_kms->pclk)
clk_prepare_enable(mdp4_kms->pclk);
clk_prepare_enable(mdp4_kms->lut_clk);
+ if (mdp4_kms->axi_clk)
+ clk_prepare_enable(mdp4_kms->axi_clk);
return 0;
}
@@ -294,15 +298,17 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- mdp4_kms->dsi_pll_vdda = devm_regulator_get(&pdev->dev, "dsi_pll_vdda");
+ mdp4_kms->dsi_pll_vdda =
+ devm_regulator_get_optional(&pdev->dev, "dsi_pll_vdda");
if (IS_ERR(mdp4_kms->dsi_pll_vdda))
mdp4_kms->dsi_pll_vdda = NULL;
- mdp4_kms->dsi_pll_vddio = devm_regulator_get(&pdev->dev, "dsi_pll_vddio");
+ mdp4_kms->dsi_pll_vddio =
+ devm_regulator_get_optional(&pdev->dev, "dsi_pll_vddio");
if (IS_ERR(mdp4_kms->dsi_pll_vddio))
mdp4_kms->dsi_pll_vddio = NULL;
- mdp4_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
if (IS_ERR(mdp4_kms->vdd))
mdp4_kms->vdd = NULL;
@@ -333,6 +339,13 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
+ mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "mdp_axi_clk");
+ if (IS_ERR(mdp4_kms->axi_clk)) {
+ dev_err(dev->dev, "failed to get axi_clk\n");
+ ret = PTR_ERR(mdp4_kms->axi_clk);
+ goto fail;
+ }
+
clk_set_rate(mdp4_kms->clk, config->max_clk);
clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
@@ -348,7 +361,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdelay(16);
if (config->iommu) {
- mmu = msm_iommu_new(dev, config->iommu);
+ mmu = msm_iommu_new(&pdev->dev, config->iommu);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
goto fail;
@@ -406,6 +419,8 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
static struct mdp4_platform_config config = {};
#ifdef CONFIG_OF
/* TODO */
+ config.max_clk = 266667000;
+ config.iommu = iommu_domain_alloc(&platform_bus_type);
#else
if (cpu_is_apq8064())
config.max_clk = 266667000;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 715520c54cde..3225da804c61 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -42,6 +42,7 @@ struct mdp4_kms {
struct clk *clk;
struct clk *pclk;
struct clk *lut_clk;
+ struct clk *axi_clk;
struct mdp_irq error_handler;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 0aa51517f826..67f4f896ba8c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -12,14 +12,14 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
-Copyright (C) 2013 by the following authors:
+Copyright (C) 2013-2014 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
Permission is hereby granted, free of charge, to any person obtaining
@@ -68,6 +68,8 @@ enum mdp5_pipe {
SSPP_RGB2 = 5,
SSPP_DMA0 = 6,
SSPP_DMA1 = 7,
+ SSPP_VIG3 = 8,
+ SSPP_RGB3 = 9,
};
enum mdp5_ctl_mode {
@@ -126,7 +128,11 @@ enum mdp5_client_id {
CID_RGB0 = 16,
CID_RGB1 = 17,
CID_RGB2 = 18,
- CID_MAX = 19,
+ CID_VIG3_Y = 19,
+ CID_VIG3_CR = 20,
+ CID_VIG3_CB = 21,
+ CID_RGB3 = 22,
+ CID_MAX = 23,
};
enum mdp5_igc_type {
@@ -299,11 +305,34 @@ static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
-static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000600 + 0x100*i0; }
+static inline uint32_t __offset_CTL(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->ctl.base[0]);
+ case 1: return (mdp5_cfg->ctl.base[1]);
+ case 2: return (mdp5_cfg->ctl.base[2]);
+ case 3: return (mdp5_cfg->ctl.base[3]);
+ case 4: return (mdp5_cfg->ctl.base[4]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_CTL(uint32_t i0) { return 0x00000000 + __offset_CTL(i0); }
-static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; }
+static inline uint32_t __offset_LAYER(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00000000;
+ case 1: return 0x00000004;
+ case 2: return 0x00000008;
+ case 3: return 0x0000000c;
+ case 4: return 0x00000010;
+ case 5: return 0x00000024;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_CTL_LAYER(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
-static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000600 + 0x100*i0 + 0x4*i1; }
+static inline uint32_t REG_MDP5_CTL_LAYER_REG(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_CTL(i0) + __offset_LAYER(i1); }
#define MDP5_CTL_LAYER_REG_VIG0__MASK 0x00000007
#define MDP5_CTL_LAYER_REG_VIG0__SHIFT 0
static inline uint32_t MDP5_CTL_LAYER_REG_VIG0(enum mdp_mixer_stage_id val)
@@ -354,8 +383,20 @@ static inline uint32_t MDP5_CTL_LAYER_REG_DMA1(enum mdp_mixer_stage_id val)
}
#define MDP5_CTL_LAYER_REG_BORDER_COLOR 0x01000000
#define MDP5_CTL_LAYER_REG_CURSOR_OUT 0x02000000
+#define MDP5_CTL_LAYER_REG_VIG3__MASK 0x1c000000
+#define MDP5_CTL_LAYER_REG_VIG3__SHIFT 26
+static inline uint32_t MDP5_CTL_LAYER_REG_VIG3(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_VIG3__SHIFT) & MDP5_CTL_LAYER_REG_VIG3__MASK;
+}
+#define MDP5_CTL_LAYER_REG_RGB3__MASK 0xe0000000
+#define MDP5_CTL_LAYER_REG_RGB3__SHIFT 29
+static inline uint32_t MDP5_CTL_LAYER_REG_RGB3(enum mdp_mixer_stage_id val)
+{
+ return ((val) << MDP5_CTL_LAYER_REG_RGB3__SHIFT) & MDP5_CTL_LAYER_REG_RGB3__MASK;
+}
-static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000614 + 0x100*i0; }
+static inline uint32_t REG_MDP5_CTL_OP(uint32_t i0) { return 0x00000014 + __offset_CTL(i0); }
#define MDP5_CTL_OP_MODE__MASK 0x0000000f
#define MDP5_CTL_OP_MODE__SHIFT 0
static inline uint32_t MDP5_CTL_OP_MODE(enum mdp5_ctl_mode val)
@@ -377,7 +418,7 @@ static inline uint32_t MDP5_CTL_OP_PACK_3D(enum mdp5_pack_3d val)
return ((val) << MDP5_CTL_OP_PACK_3D__SHIFT) & MDP5_CTL_OP_PACK_3D__MASK;
}
-static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000618 + 0x100*i0; }
+static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000018 + __offset_CTL(i0); }
#define MDP5_CTL_FLUSH_VIG0 0x00000001
#define MDP5_CTL_FLUSH_VIG1 0x00000002
#define MDP5_CTL_FLUSH_VIG2 0x00000004
@@ -387,26 +428,48 @@ static inline uint32_t REG_MDP5_CTL_FLUSH(uint32_t i0) { return 0x00000618 + 0x1
#define MDP5_CTL_FLUSH_LM0 0x00000040
#define MDP5_CTL_FLUSH_LM1 0x00000080
#define MDP5_CTL_FLUSH_LM2 0x00000100
+#define MDP5_CTL_FLUSH_LM3 0x00000200
+#define MDP5_CTL_FLUSH_LM4 0x00000400
#define MDP5_CTL_FLUSH_DMA0 0x00000800
#define MDP5_CTL_FLUSH_DMA1 0x00001000
#define MDP5_CTL_FLUSH_DSPP0 0x00002000
#define MDP5_CTL_FLUSH_DSPP1 0x00004000
#define MDP5_CTL_FLUSH_DSPP2 0x00008000
#define MDP5_CTL_FLUSH_CTL 0x00020000
+#define MDP5_CTL_FLUSH_VIG3 0x00040000
+#define MDP5_CTL_FLUSH_RGB3 0x00080000
+#define MDP5_CTL_FLUSH_LM5 0x00100000
+#define MDP5_CTL_FLUSH_DSPP3 0x00200000
-static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000061c + 0x100*i0; }
+static inline uint32_t REG_MDP5_CTL_START(uint32_t i0) { return 0x0000001c + __offset_CTL(i0); }
-static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000620 + 0x100*i0; }
+static inline uint32_t REG_MDP5_CTL_PACK_3D(uint32_t i0) { return 0x00000020 + __offset_CTL(i0); }
-static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; }
+static inline uint32_t __offset_PIPE(enum mdp5_pipe idx)
+{
+ switch (idx) {
+ case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]);
+ case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]);
+ case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]);
+ case SSPP_RGB0: return (mdp5_cfg->pipe_rgb.base[0]);
+ case SSPP_RGB1: return (mdp5_cfg->pipe_rgb.base[1]);
+ case SSPP_RGB2: return (mdp5_cfg->pipe_rgb.base[2]);
+ case SSPP_DMA0: return (mdp5_cfg->pipe_dma.base[0]);
+ case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]);
+ case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]);
+ case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_PIPE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000014c4 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_HIST_CTL_BASE(enum mdp5_pipe i0) { return 0x000002c4 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000014f0 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_BASE(enum mdp5_pipe i0) { return 0x000002f0 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00001500 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_HIST_LUT_SWAP(enum mdp5_pipe i0) { return 0x00000300 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00001200 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_SIZE(enum mdp5_pipe i0) { return 0x00000000 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
#define MDP5_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
static inline uint32_t MDP5_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
@@ -420,7 +483,7 @@ static inline uint32_t MDP5_PIPE_SRC_SIZE_WIDTH(uint32_t val)
return ((val) << MDP5_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_SIZE_WIDTH__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00001204 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_IMG_SIZE(enum mdp5_pipe i0) { return 0x00000004 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__MASK 0xffff0000
#define MDP5_PIPE_SRC_IMG_SIZE_HEIGHT__SHIFT 16
static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(uint32_t val)
@@ -434,7 +497,7 @@ static inline uint32_t MDP5_PIPE_SRC_IMG_SIZE_WIDTH(uint32_t val)
return ((val) << MDP5_PIPE_SRC_IMG_SIZE_WIDTH__SHIFT) & MDP5_PIPE_SRC_IMG_SIZE_WIDTH__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00001208 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_XY(enum mdp5_pipe i0) { return 0x00000008 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_XY_Y__MASK 0xffff0000
#define MDP5_PIPE_SRC_XY_Y__SHIFT 16
static inline uint32_t MDP5_PIPE_SRC_XY_Y(uint32_t val)
@@ -448,7 +511,7 @@ static inline uint32_t MDP5_PIPE_SRC_XY_X(uint32_t val)
return ((val) << MDP5_PIPE_SRC_XY_X__SHIFT) & MDP5_PIPE_SRC_XY_X__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000120c + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_OUT_SIZE(enum mdp5_pipe i0) { return 0x0000000c + __offset_PIPE(i0); }
#define MDP5_PIPE_OUT_SIZE_HEIGHT__MASK 0xffff0000
#define MDP5_PIPE_OUT_SIZE_HEIGHT__SHIFT 16
static inline uint32_t MDP5_PIPE_OUT_SIZE_HEIGHT(uint32_t val)
@@ -462,7 +525,7 @@ static inline uint32_t MDP5_PIPE_OUT_SIZE_WIDTH(uint32_t val)
return ((val) << MDP5_PIPE_OUT_SIZE_WIDTH__SHIFT) & MDP5_PIPE_OUT_SIZE_WIDTH__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00001210 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_OUT_XY(enum mdp5_pipe i0) { return 0x00000010 + __offset_PIPE(i0); }
#define MDP5_PIPE_OUT_XY_Y__MASK 0xffff0000
#define MDP5_PIPE_OUT_XY_Y__SHIFT 16
static inline uint32_t MDP5_PIPE_OUT_XY_Y(uint32_t val)
@@ -476,15 +539,15 @@ static inline uint32_t MDP5_PIPE_OUT_XY_X(uint32_t val)
return ((val) << MDP5_PIPE_OUT_XY_X__SHIFT) & MDP5_PIPE_OUT_XY_X__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00001214 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC0_ADDR(enum mdp5_pipe i0) { return 0x00000014 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00001218 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC1_ADDR(enum mdp5_pipe i0) { return 0x00000018 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000121c + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC2_ADDR(enum mdp5_pipe i0) { return 0x0000001c + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00001220 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC3_ADDR(enum mdp5_pipe i0) { return 0x00000020 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00001224 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_A(enum mdp5_pipe i0) { return 0x00000024 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
#define MDP5_PIPE_SRC_STRIDE_A_P0__SHIFT 0
static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P0(uint32_t val)
@@ -498,7 +561,7 @@ static inline uint32_t MDP5_PIPE_SRC_STRIDE_A_P1(uint32_t val)
return ((val) << MDP5_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP5_PIPE_SRC_STRIDE_A_P1__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00001228 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_STRIDE_B(enum mdp5_pipe i0) { return 0x00000028 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
#define MDP5_PIPE_SRC_STRIDE_B_P2__SHIFT 0
static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P2(uint32_t val)
@@ -512,9 +575,9 @@ static inline uint32_t MDP5_PIPE_SRC_STRIDE_B_P3(uint32_t val)
return ((val) << MDP5_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP5_PIPE_SRC_STRIDE_B_P3__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000122c + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_STILE_FRAME_SIZE(enum mdp5_pipe i0) { return 0x0000002c + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00001230 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_FORMAT(enum mdp5_pipe i0) { return 0x00000030 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
#define MDP5_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
static inline uint32_t MDP5_PIPE_SRC_FORMAT_G_BPC(enum mdp_bpc val)
@@ -568,7 +631,7 @@ static inline uint32_t MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(enum mdp5_chroma_samp_ty
return ((val) << MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__SHIFT) & MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00001234 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_UNPACK(enum mdp5_pipe i0) { return 0x00000034 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
#define MDP5_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
@@ -594,7 +657,7 @@ static inline uint32_t MDP5_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
return ((val) << MDP5_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP5_PIPE_SRC_UNPACK_ELEM3__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00001238 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_OP_MODE(enum mdp5_pipe i0) { return 0x00000038 + __offset_PIPE(i0); }
#define MDP5_PIPE_SRC_OP_MODE_BWC_EN 0x00000001
#define MDP5_PIPE_SRC_OP_MODE_BWC__MASK 0x00000006
#define MDP5_PIPE_SRC_OP_MODE_BWC__SHIFT 1
@@ -610,29 +673,29 @@ static inline uint32_t MDP5_PIPE_SRC_OP_MODE_BWC(enum mdp5_pipe_bwc val)
#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE 0x00400000
#define MDP5_PIPE_SRC_OP_MODE_DEINTERLACE_ODD 0x00800000
-static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000123c + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_CONSTANT_COLOR(enum mdp5_pipe i0) { return 0x0000003c + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00001248 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_FETCH_CONFIG(enum mdp5_pipe i0) { return 0x00000048 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000124c + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_VC1_RANGE(enum mdp5_pipe i0) { return 0x0000004c + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00001250 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(enum mdp5_pipe i0) { return 0x00000050 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00001254 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(enum mdp5_pipe i0) { return 0x00000054 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00001258 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(enum mdp5_pipe i0) { return 0x00000058 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00001270 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(enum mdp5_pipe i0) { return 0x00000070 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000012a4 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC0_ADDR(enum mdp5_pipe i0) { return 0x000000a4 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000012a8 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC1_ADDR(enum mdp5_pipe i0) { return 0x000000a8 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000012ac + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC2_ADDR(enum mdp5_pipe i0) { return 0x000000ac + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000012b0 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_CURRENT_SRC3_ADDR(enum mdp5_pipe i0) { return 0x000000b0 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000012b4 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_DECIMATION(enum mdp5_pipe i0) { return 0x000000b4 + __offset_PIPE(i0); }
#define MDP5_PIPE_DECIMATION_VERT__MASK 0x000000ff
#define MDP5_PIPE_DECIMATION_VERT__SHIFT 0
static inline uint32_t MDP5_PIPE_DECIMATION_VERT(uint32_t val)
@@ -646,7 +709,7 @@ static inline uint32_t MDP5_PIPE_DECIMATION_HORZ(uint32_t val)
return ((val) << MDP5_PIPE_DECIMATION_HORZ__SHIFT) & MDP5_PIPE_DECIMATION_HORZ__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00001404 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SCALE_CONFIG(enum mdp5_pipe i0) { return 0x00000204 + __offset_PIPE(i0); }
#define MDP5_PIPE_SCALE_CONFIG_SCALEX_EN 0x00000001
#define MDP5_PIPE_SCALE_CONFIG_SCALEY_EN 0x00000002
#define MDP5_PIPE_SCALE_CONFIG_SCALEX_MIN_FILTER__MASK 0x00000300
@@ -686,23 +749,34 @@ static inline uint32_t MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(enum mdp5_scale_
return ((val) << MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__SHIFT) & MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER__MASK;
}
-static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00001410 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_X(enum mdp5_pipe i0) { return 0x00000210 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00001414 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(enum mdp5_pipe i0) { return 0x00000214 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00001420 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_X(enum mdp5_pipe i0) { return 0x00000220 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00001424 + 0x400*i0; }
+static inline uint32_t REG_MDP5_PIPE_SCALE_INIT_PHASE_Y(enum mdp5_pipe i0) { return 0x00000224 + __offset_PIPE(i0); }
-static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00003200 + 0x400*i0; }
+static inline uint32_t __offset_LM(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->lm.base[0]);
+ case 1: return (mdp5_cfg->lm.base[1]);
+ case 2: return (mdp5_cfg->lm.base[2]);
+ case 3: return (mdp5_cfg->lm.base[3]);
+ case 4: return (mdp5_cfg->lm.base[4]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_LM(uint32_t i0) { return 0x00000000 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00003200 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00000000 + __offset_LM(i0); }
#define MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA 0x00000002
#define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004
#define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008
#define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010
-static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00003204 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); }
#define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000
#define MDP5_LM_OUT_SIZE_HEIGHT__SHIFT 16
static inline uint32_t MDP5_LM_OUT_SIZE_HEIGHT(uint32_t val)
@@ -716,13 +790,13 @@ static inline uint32_t MDP5_LM_OUT_SIZE_WIDTH(uint32_t val)
return ((val) << MDP5_LM_OUT_SIZE_WIDTH__SHIFT) & MDP5_LM_OUT_SIZE_WIDTH__MASK;
}
-static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00003208 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_0(uint32_t i0) { return 0x00000008 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00003210 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_BORDER_COLOR_1(uint32_t i0) { return 0x00000010 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00003220 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_OP_MODE(uint32_t i0, uint32_t i1) { return 0x00000020 + __offset_LM(i0) + 0x30*i1; }
#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__MASK 0x00000003
#define MDP5_LM_BLEND_OP_MODE_FG_ALPHA__SHIFT 0
static inline uint32_t MDP5_LM_BLEND_OP_MODE_FG_ALPHA(enum mdp_alpha_type val)
@@ -744,57 +818,67 @@ static inline uint32_t MDP5_LM_BLEND_OP_MODE_BG_ALPHA(enum mdp_alpha_type val)
#define MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA 0x00001000
#define MDP5_LM_BLEND_OP_MODE_BG_TRANSP_EN 0x00002000
-static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003224 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000024 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00003228 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000028 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000322c + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000002c + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003230 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000030 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003234 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000034 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003238 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_FG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000038 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000323c + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000003c + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00003240 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000040 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00003244 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000044 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00003248 + 0x400*i0 + 0x30*i1; }
+static inline uint32_t REG_MDP5_LM_BLEND_BG_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000048 + __offset_LM(i0) + 0x30*i1; }
-static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000032e0 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_IMG_SIZE(uint32_t i0) { return 0x000000e0 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000032e4 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_SIZE(uint32_t i0) { return 0x000000e4 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000032e8 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_XY(uint32_t i0) { return 0x000000e8 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000032dc + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_STRIDE(uint32_t i0) { return 0x000000dc + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000032ec + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_FORMAT(uint32_t i0) { return 0x000000ec + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000032f0 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BASE_ADDR(uint32_t i0) { return 0x000000f0 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000032f4 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_START_XY(uint32_t i0) { return 0x000000f4 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000032f8 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_CONFIG(uint32_t i0) { return 0x000000f8 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000032fc + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_PARAM(uint32_t i0) { return 0x000000fc + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00003300 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW0(uint32_t i0) { return 0x00000100 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00003304 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_LOW1(uint32_t i0) { return 0x00000104 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00003308 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH0(uint32_t i0) { return 0x00000108 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000330c + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_CURSOR_BLEND_TRANSP_HIGH1(uint32_t i0) { return 0x0000010c + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00003310 + 0x400*i0; }
+static inline uint32_t REG_MDP5_LM_GC_LUT_BASE(uint32_t i0) { return 0x00000110 + __offset_LM(i0); }
-static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00004600 + 0x400*i0; }
+static inline uint32_t __offset_DSPP(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->dspp.base[0]);
+ case 1: return (mdp5_cfg->dspp.base[1]);
+ case 2: return (mdp5_cfg->dspp.base[2]);
+ case 3: return (mdp5_cfg->dspp.base[3]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_DSPP(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00004600 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_OP_MODE(uint32_t i0) { return 0x00000000 + __offset_DSPP(i0); }
#define MDP5_DSPP_OP_MODE_IGC_LUT_EN 0x00000001
#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__MASK 0x0000000e
#define MDP5_DSPP_OP_MODE_IGC_TBL_IDX__SHIFT 1
@@ -811,29 +895,40 @@ static inline uint32_t MDP5_DSPP_OP_MODE_IGC_TBL_IDX(uint32_t val)
#define MDP5_DSPP_OP_MODE_GAMUT_EN 0x00800000
#define MDP5_DSPP_OP_MODE_GAMUT_ORDER 0x01000000
-static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00004630 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_PCC_BASE(uint32_t i0) { return 0x00000030 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00004750 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_DITHER_DEPTH(uint32_t i0) { return 0x00000150 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00004810 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_HIST_CTL_BASE(uint32_t i0) { return 0x00000210 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00004830 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_BASE(uint32_t i0) { return 0x00000230 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00004834 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_HIST_LUT_SWAP(uint32_t i0) { return 0x00000234 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00004838 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_PA_BASE(uint32_t i0) { return 0x00000238 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000048dc + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_GAMUT_BASE(uint32_t i0) { return 0x000002dc + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000048b0 + 0x400*i0; }
+static inline uint32_t REG_MDP5_DSPP_GC_BASE(uint32_t i0) { return 0x000002b0 + __offset_DSPP(i0); }
-static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00012500 + 0x200*i0; }
+static inline uint32_t __offset_INTF(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->intf.base[0]);
+ case 1: return (mdp5_cfg->intf.base[1]);
+ case 2: return (mdp5_cfg->intf.base[2]);
+ case 3: return (mdp5_cfg->intf.base[3]);
+ case 4: return (mdp5_cfg->intf.base[4]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_INTF(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00012500 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TIMING_ENGINE_EN(uint32_t i0) { return 0x00000000 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00012504 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_CONFIG(uint32_t i0) { return 0x00000004 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00012508 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_HSYNC_CTL(uint32_t i0) { return 0x00000008 + __offset_INTF(i0); }
#define MDP5_INTF_HSYNC_CTL_PULSEW__MASK 0x0000ffff
#define MDP5_INTF_HSYNC_CTL_PULSEW__SHIFT 0
static inline uint32_t MDP5_INTF_HSYNC_CTL_PULSEW(uint32_t val)
@@ -847,23 +942,23 @@ static inline uint32_t MDP5_INTF_HSYNC_CTL_PERIOD(uint32_t val)
return ((val) << MDP5_INTF_HSYNC_CTL_PERIOD__SHIFT) & MDP5_INTF_HSYNC_CTL_PERIOD__MASK;
}
-static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0001250c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F0(uint32_t i0) { return 0x0000000c + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00012510 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_VSYNC_PERIOD_F1(uint32_t i0) { return 0x00000010 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00012514 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F0(uint32_t i0) { return 0x00000014 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00012518 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_VSYNC_LEN_F1(uint32_t i0) { return 0x00000018 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0001251c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F0(uint32_t i0) { return 0x0000001c + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00012520 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VSTART_F1(uint32_t i0) { return 0x00000020 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00012524 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F0(uint32_t i0) { return 0x00000024 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00012528 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DISPLAY_VEND_F1(uint32_t i0) { return 0x00000028 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0001252c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F0(uint32_t i0) { return 0x0000002c + __offset_INTF(i0); }
#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__MASK 0x7fffffff
#define MDP5_INTF_ACTIVE_VSTART_F0_VAL__SHIFT 0
static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val)
@@ -872,7 +967,7 @@ static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F0_VAL(uint32_t val)
}
#define MDP5_INTF_ACTIVE_VSTART_F0_ACTIVE_V_ENABLE 0x80000000
-static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00012530 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VSTART_F1(uint32_t i0) { return 0x00000030 + __offset_INTF(i0); }
#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK 0x7fffffff
#define MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT 0
static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val)
@@ -880,11 +975,11 @@ static inline uint32_t MDP5_INTF_ACTIVE_VSTART_F1_VAL(uint32_t val)
return ((val) << MDP5_INTF_ACTIVE_VSTART_F1_VAL__SHIFT) & MDP5_INTF_ACTIVE_VSTART_F1_VAL__MASK;
}
-static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00012534 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F0(uint32_t i0) { return 0x00000034 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00012538 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_ACTIVE_VEND_F1(uint32_t i0) { return 0x00000038 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0001253c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DISPLAY_HCTL(uint32_t i0) { return 0x0000003c + __offset_INTF(i0); }
#define MDP5_INTF_DISPLAY_HCTL_START__MASK 0x0000ffff
#define MDP5_INTF_DISPLAY_HCTL_START__SHIFT 0
static inline uint32_t MDP5_INTF_DISPLAY_HCTL_START(uint32_t val)
@@ -898,7 +993,7 @@ static inline uint32_t MDP5_INTF_DISPLAY_HCTL_END(uint32_t val)
return ((val) << MDP5_INTF_DISPLAY_HCTL_END__SHIFT) & MDP5_INTF_DISPLAY_HCTL_END__MASK;
}
-static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00012540 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_ACTIVE_HCTL(uint32_t i0) { return 0x00000040 + __offset_INTF(i0); }
#define MDP5_INTF_ACTIVE_HCTL_START__MASK 0x00007fff
#define MDP5_INTF_ACTIVE_HCTL_START__SHIFT 0
static inline uint32_t MDP5_INTF_ACTIVE_HCTL_START(uint32_t val)
@@ -913,124 +1008,132 @@ static inline uint32_t MDP5_INTF_ACTIVE_HCTL_END(uint32_t val)
}
#define MDP5_INTF_ACTIVE_HCTL_ACTIVE_H_ENABLE 0x80000000
-static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00012544 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_BORDER_COLOR(uint32_t i0) { return 0x00000044 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00012548 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_UNDERFLOW_COLOR(uint32_t i0) { return 0x00000048 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0001254c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_HSYNC_SKEW(uint32_t i0) { return 0x0000004c + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00012550 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_POLARITY_CTL(uint32_t i0) { return 0x00000050 + __offset_INTF(i0); }
#define MDP5_INTF_POLARITY_CTL_HSYNC_LOW 0x00000001
#define MDP5_INTF_POLARITY_CTL_VSYNC_LOW 0x00000002
#define MDP5_INTF_POLARITY_CTL_DATA_EN_LOW 0x00000004
-static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00012554 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TEST_CTL(uint32_t i0) { return 0x00000054 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00012558 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TP_COLOR0(uint32_t i0) { return 0x00000058 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0001255c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TP_COLOR1(uint32_t i0) { return 0x0000005c + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00012584 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DSI_CMD_MODE_TRIGGER_EN(uint32_t i0) { return 0x00000084 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00012590 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_PANEL_FORMAT(uint32_t i0) { return 0x00000090 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000125a8 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_FRAME_LINE_COUNT_EN(uint32_t i0) { return 0x000000a8 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000125ac + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_FRAME_COUNT(uint32_t i0) { return 0x000000ac + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000125b0 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_LINE_COUNT(uint32_t i0) { return 0x000000b0 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000125f0 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_CONFIG(uint32_t i0) { return 0x000000f0 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000125f4 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_STRNG_COEFF(uint32_t i0) { return 0x000000f4 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000125f8 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_DEFLICKER_WEAK_COEFF(uint32_t i0) { return 0x000000f8 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00012600 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_ENABLE(uint32_t i0) { return 0x00000100 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00012604 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_MAIN_CONTROL(uint32_t i0) { return 0x00000104 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00012608 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_VIDEO_CONFIG(uint32_t i0) { return 0x00000108 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0001260c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_COMPONENT_LIMITS(uint32_t i0) { return 0x0000010c + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00012610 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_RECTANGLE(uint32_t i0) { return 0x00000110 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00012614 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_INITIAL_VALUE(uint32_t i0) { return 0x00000114 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00012618 + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_BLK_WHITE_PATTERN_FRAME(uint32_t i0) { return 0x00000118 + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0001261c + 0x200*i0; }
+static inline uint32_t REG_MDP5_INTF_TPG_RGB_MAPPING(uint32_t i0) { return 0x0000011c + __offset_INTF(i0); }
-static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00013100 + 0x200*i0; }
+static inline uint32_t __offset_AD(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return (mdp5_cfg->ad.base[0]);
+ case 1: return (mdp5_cfg->ad.base[1]);
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP5_AD(uint32_t i0) { return 0x00000000 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00013100 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_BYPASS(uint32_t i0) { return 0x00000000 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00013104 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CTRL_0(uint32_t i0) { return 0x00000004 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00013108 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CTRL_1(uint32_t i0) { return 0x00000008 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0001310c + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_FRAME_SIZE(uint32_t i0) { return 0x0000000c + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00013110 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CON_CTRL_0(uint32_t i0) { return 0x00000010 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00013114 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CON_CTRL_1(uint32_t i0) { return 0x00000014 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00013118 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_STR_MAN(uint32_t i0) { return 0x00000018 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0001311c + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_VAR(uint32_t i0) { return 0x0000001c + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00013120 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_DITH(uint32_t i0) { return 0x00000020 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00013124 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_DITH_CTRL(uint32_t i0) { return 0x00000024 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00013128 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_AMP_LIM(uint32_t i0) { return 0x00000028 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0001312c + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_SLOPE(uint32_t i0) { return 0x0000002c + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00013130 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_BW_LVL(uint32_t i0) { return 0x00000030 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00013134 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_LOGO_POS(uint32_t i0) { return 0x00000034 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00013138 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_LUT_FI(uint32_t i0) { return 0x00000038 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0001317c + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_LUT_CC(uint32_t i0) { return 0x0000007c + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000131c8 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_STR_LIM(uint32_t i0) { return 0x000000c8 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000131cc + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CALIB_AB(uint32_t i0) { return 0x000000cc + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000131d0 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CALIB_CD(uint32_t i0) { return 0x000000d0 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000131d4 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_MODE_SEL(uint32_t i0) { return 0x000000d4 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000131d8 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_TFILT_CTRL(uint32_t i0) { return 0x000000d8 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000131dc + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_BL_MINMAX(uint32_t i0) { return 0x000000dc + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000131e0 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_BL(uint32_t i0) { return 0x000000e0 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000131e8 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_BL_MAX(uint32_t i0) { return 0x000000e8 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000131ec + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_AL(uint32_t i0) { return 0x000000ec + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000131f0 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_AL_MIN(uint32_t i0) { return 0x000000f0 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000131f4 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_AL_FILT(uint32_t i0) { return 0x000000f4 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000131f8 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CFG_BUF(uint32_t i0) { return 0x000000f8 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00013200 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_LUT_AL(uint32_t i0) { return 0x00000100 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00013244 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_TARG_STR(uint32_t i0) { return 0x00000144 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00013248 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_START_CALC(uint32_t i0) { return 0x00000148 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0001324c + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_STR_OUT(uint32_t i0) { return 0x0000014c + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00013254 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_BL_OUT(uint32_t i0) { return 0x00000154 + __offset_AD(i0); }
-static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00013258 + 0x200*i0; }
+static inline uint32_t REG_MDP5_AD_CALC_DONE(uint32_t i0) { return 0x00000158 + __offset_AD(i0); }
#endif /* MDP5_XML */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 71510ee26e96..31a2c6331a1d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -26,14 +26,98 @@ static const char *iommu_ports[] = {
static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
-static int mdp5_hw_init(struct msm_kms *kms)
+const struct mdp5_config *mdp5_cfg;
+
+static const struct mdp5_config msm8x74_config = {
+ .name = "msm8x74",
+ .ctl = {
+ .count = 5,
+ .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ },
+ .pipe_vig = {
+ .count = 3,
+ .base = { 0x01200, 0x01600, 0x01a00 },
+ },
+ .pipe_rgb = {
+ .count = 3,
+ .base = { 0x01e00, 0x02200, 0x02600 },
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x02a00, 0x02e00 },
+ },
+ .lm = {
+ .count = 5,
+ .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
+ },
+ .dspp = {
+ .count = 3,
+ .base = { 0x04600, 0x04a00, 0x04e00 },
+ },
+ .ad = {
+ .count = 2,
+ .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
+ },
+ .intf = {
+ .count = 4,
+ .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
+ },
+};
+
+static const struct mdp5_config apq8084_config = {
+ .name = "apq8084",
+ .ctl = {
+ .count = 5,
+ .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
+ },
+ .pipe_vig = {
+ .count = 4,
+ .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
+ },
+ .pipe_rgb = {
+ .count = 4,
+ .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
+ },
+ .pipe_dma = {
+ .count = 2,
+ .base = { 0x03200, 0x03600 },
+ },
+ .lm = {
+ .count = 6,
+ .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
+ },
+ .dspp = {
+ .count = 4,
+ .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
+
+ },
+ .ad = {
+ .count = 3,
+ .base = { 0x13500, 0x13700, 0x13900 },
+ },
+ .intf = {
+ .count = 5,
+ .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
+ },
+};
+
+struct mdp5_config_entry {
+ int revision;
+ const struct mdp5_config *config;
+};
+
+static const struct mdp5_config_entry mdp5_configs[] = {
+ { .revision = 0, .config = &msm8x74_config },
+ { .revision = 2, .config = &msm8x74_config },
+ { .revision = 3, .config = &apq8084_config },
+};
+
+static int mdp5_select_hw_cfg(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct drm_device *dev = mdp5_kms->dev;
uint32_t version, major, minor;
- int ret = 0;
-
- pm_runtime_get_sync(dev->dev);
+ int i, ret = 0;
mdp5_enable(mdp5_kms);
version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
@@ -44,8 +128,8 @@ static int mdp5_hw_init(struct msm_kms *kms)
DBG("found MDP5 version v%d.%d", major, minor);
- if ((major != 1) || ((minor != 0) && (minor != 2))) {
- dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
+ if (major != 1) {
+ dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto out;
@@ -53,6 +137,35 @@ static int mdp5_hw_init(struct msm_kms *kms)
mdp5_kms->rev = minor;
+ /* only after mdp5_cfg global pointer's init can we access the hw */
+ for (i = 0; i < ARRAY_SIZE(mdp5_configs); i++) {
+ if (mdp5_configs[i].revision != minor)
+ continue;
+ mdp5_kms->hw_cfg = mdp5_cfg = mdp5_configs[i].config;
+ break;
+ }
+ if (unlikely(!mdp5_kms->hw_cfg)) {
+ dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto out;
+ }
+
+ DBG("MDP5: %s config selected", mdp5_kms->hw_cfg->name);
+
+ return 0;
+out:
+ return ret;
+}
+
+static int mdp5_hw_init(struct msm_kms *kms)
+{
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+ struct drm_device *dev = mdp5_kms->dev;
+ int i;
+
+ pm_runtime_get_sync(dev->dev);
+
/* Magic unknown register writes:
*
* W VBIF:0x004 00000001 (mdss_mdp.c:839)
@@ -78,15 +191,13 @@ static int mdp5_hw_init(struct msm_kms *kms)
*/
mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(0), 0);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(1), 0);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(2), 0);
- mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(3), 0);
-out:
+ for (i = 0; i < mdp5_kms->hw_cfg->ctl.count; i++)
+ mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(i), 0);
+
pm_runtime_put_sync(dev->dev);
- return ret;
+ return 0;
}
static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
@@ -161,7 +272,7 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
static int modeset_init(struct mdp5_kms *mdp5_kms)
{
static const enum mdp5_pipe crtcs[] = {
- SSPP_RGB0, SSPP_RGB1, SSPP_RGB2,
+ SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
};
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
@@ -169,7 +280,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
int i, ret;
/* construct CRTCs: */
- for (i = 0; i < ARRAY_SIZE(crtcs); i++) {
+ for (i = 0; i < mdp5_kms->hw_cfg->pipe_rgb.count; i++) {
struct drm_plane *plane;
struct drm_crtc *crtc;
@@ -246,7 +357,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
struct mdp5_kms *mdp5_kms;
struct msm_kms *kms = NULL;
struct msm_mmu *mmu;
- int ret;
+ int i, ret;
mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
if (!mdp5_kms) {
@@ -307,20 +418,22 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk);
+ ret = mdp5_select_hw_cfg(kms);
+ if (ret)
+ goto fail;
+
/* make sure things are off before attaching iommu (bootloader could
* have left things on, in which case we'll start getting faults if
* we don't disable):
*/
mdp5_enable(mdp5_kms);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(0), 0);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(1), 0);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(2), 0);
- mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(3), 0);
+ for (i = 0; i < mdp5_kms->hw_cfg->intf.count; i++)
+ mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
mdp5_disable(mdp5_kms);
mdelay(16);
if (config->iommu) {
- mmu = msm_iommu_new(dev, config->iommu);
+ mmu = msm_iommu_new(&pdev->dev, config->iommu);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
dev_err(dev->dev, "failed to init iommu: %d\n", ret);
@@ -368,5 +481,11 @@ static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
#ifdef CONFIG_OF
/* TODO */
#endif
+ config.iommu = iommu_domain_alloc(&platform_bus_type);
+ /* TODO hard-coded in downstream mdss, but should it be? */
+ config.max_clk = 200000000;
+ /* TODO get from DT: */
+ config.smp_blk_cnt = 22;
+
return &config;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 6e981b692d1d..5bf340dd0f00 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -21,6 +21,24 @@
#include "msm_drv.h"
#include "msm_kms.h"
#include "mdp/mdp_kms.h"
+/* dynamic offsets used by mdp5.xml.h (initialized in mdp5_kms.c) */
+#define MDP5_MAX_BASES 8
+struct mdp5_sub_block {
+ int count;
+ uint32_t base[MDP5_MAX_BASES];
+};
+struct mdp5_config {
+ char *name;
+ struct mdp5_sub_block ctl;
+ struct mdp5_sub_block pipe_vig;
+ struct mdp5_sub_block pipe_rgb;
+ struct mdp5_sub_block pipe_dma;
+ struct mdp5_sub_block lm;
+ struct mdp5_sub_block dspp;
+ struct mdp5_sub_block ad;
+ struct mdp5_sub_block intf;
+};
+extern const struct mdp5_config *mdp5_cfg;
#include "mdp5.xml.h"
#include "mdp5_smp.h"
@@ -30,6 +48,7 @@ struct mdp5_kms {
struct drm_device *dev;
int rev;
+ const struct mdp5_config *hw_cfg;
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
@@ -82,6 +101,7 @@ static inline const char *pipe2name(enum mdp5_pipe pipe)
NAME(VIG0), NAME(VIG1), NAME(VIG2),
NAME(RGB0), NAME(RGB1), NAME(RGB2),
NAME(DMA0), NAME(DMA1),
+ NAME(VIG3), NAME(RGB3),
#undef NAME
};
return names[pipe];
@@ -98,6 +118,8 @@ static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
+ case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
+ case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
default: return 0;
}
}
@@ -108,6 +130,7 @@ static inline int pipe2nclients(enum mdp5_pipe pipe)
case SSPP_RGB0:
case SSPP_RGB1:
case SSPP_RGB2:
+ case SSPP_RGB3:
return 1;
default:
return 3;
@@ -126,6 +149,8 @@ static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
case SSPP_RGB2: return CID_RGB2;
case SSPP_DMA0: return CID_DMA0_Y + plane;
case SSPP_DMA1: return CID_DMA1_Y + plane;
+ case SSPP_VIG3: return CID_VIG3_Y + plane;
+ case SSPP_RGB3: return CID_RGB3;
default: return CID_UNUSED;
}
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index a9629b85b983..64c1afd6030a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -12,12 +12,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2013-12-03 20:59:13)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 20932 bytes, from 2013-12-01 15:13:04)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44)
Copyright (C) 2013 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9a5d87db5c23..b447c01ad89c 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -181,7 +181,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
struct msm_kms *kms;
int ret;
-
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(dev->dev, "failed to allocate private data\n");
@@ -314,13 +313,15 @@ fail:
static void load_gpu(struct drm_device *dev)
{
+ static DEFINE_MUTEX(init_lock);
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu;
+ mutex_lock(&init_lock);
+
if (priv->gpu)
- return;
+ goto out;
- mutex_lock(&dev->struct_mutex);
gpu = a3xx_gpu_init(dev);
if (IS_ERR(gpu)) {
dev_warn(dev->dev, "failed to load a3xx gpu\n");
@@ -330,7 +331,9 @@ static void load_gpu(struct drm_device *dev)
if (gpu) {
int ret;
+ mutex_lock(&dev->struct_mutex);
gpu->funcs->pm_resume(gpu);
+ mutex_unlock(&dev->struct_mutex);
ret = gpu->funcs->hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
@@ -340,12 +343,12 @@ static void load_gpu(struct drm_device *dev)
/* give inactive pm a chance to kick in: */
msm_gpu_retire(gpu);
}
-
}
priv->gpu = gpu;
- mutex_unlock(&dev->struct_mutex);
+out:
+ mutex_unlock(&init_lock);
}
static int msm_open(struct drm_device *dev, struct drm_file *file)
@@ -906,25 +909,22 @@ static int compare_of(struct device *dev, void *data)
return dev->of_node == data;
}
-static int msm_drm_add_components(struct device *master, struct master *m)
+static int add_components(struct device *dev, struct component_match **matchptr,
+ const char *name)
{
- struct device_node *np = master->of_node;
+ struct device_node *np = dev->of_node;
unsigned i;
- int ret;
for (i = 0; ; i++) {
struct device_node *node;
- node = of_parse_phandle(np, "connectors", i);
+ node = of_parse_phandle(np, name, i);
if (!node)
break;
- ret = component_master_add_child(m, compare_of, node);
- of_node_put(node);
-
- if (ret)
- return ret;
+ component_match_add(dev, matchptr, compare_of, node);
}
+
return 0;
}
#else
@@ -932,9 +932,34 @@ static int compare_dev(struct device *dev, void *data)
{
return dev == data;
}
+#endif
+
+static int msm_drm_bind(struct device *dev)
+{
+ return drm_platform_init(&msm_driver, to_platform_device(dev));
+}
+
+static void msm_drm_unbind(struct device *dev)
+{
+ drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
+}
+
+static const struct component_master_ops msm_drm_ops = {
+ .bind = msm_drm_bind,
+ .unbind = msm_drm_unbind,
+};
+
+/*
+ * Platform driver:
+ */
-static int msm_drm_add_components(struct device *master, struct master *m)
+static int msm_pdev_probe(struct platform_device *pdev)
{
+ struct component_match *match = NULL;
+#ifdef CONFIG_OF
+ add_components(&pdev->dev, &match, "connectors");
+ add_components(&pdev->dev, &match, "gpus");
+#else
/* For non-DT case, it kinda sucks. We don't actually have a way
* to know whether or not we are waiting for certain devices (or if
* they are simply not present). But for non-DT we only need to
@@ -958,41 +983,12 @@ static int msm_drm_add_components(struct device *master, struct master *m)
return -EPROBE_DEFER;
}
- ret = component_master_add_child(m, compare_dev, dev);
- if (ret) {
- DBG("could not add child: %d", ret);
- return ret;
- }
+ component_match_add(&pdev->dev, &match, compare_dev, dev);
}
-
- return 0;
-}
#endif
-static int msm_drm_bind(struct device *dev)
-{
- return drm_platform_init(&msm_driver, to_platform_device(dev));
-}
-
-static void msm_drm_unbind(struct device *dev)
-{
- drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
-}
-
-static const struct component_master_ops msm_drm_ops = {
- .add_components = msm_drm_add_components,
- .bind = msm_drm_bind,
- .unbind = msm_drm_unbind,
-};
-
-/*
- * Platform driver:
- */
-
-static int msm_pdev_probe(struct platform_device *pdev)
-{
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- return component_master_add(&pdev->dev, &msm_drm_ops);
+ return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
}
static int msm_pdev_remove(struct platform_device *pdev)
@@ -1008,7 +1004,8 @@ static const struct platform_device_id msm_id[] = {
};
static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,mdss_mdp" },
+ { .compatible = "qcom,mdp" }, /* mdp4 */
+ { .compatible = "qcom,mdss_mdp" }, /* mdp5 */
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 5107fc4826bc..9c5221ce391a 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -19,6 +19,11 @@
#include "drm_crtc.h"
#include "drm_fb_helper.h"
+#include "msm_gem.h"
+
+extern int msm_gem_mmap_obj(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
+static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
/*
* fbdev funcs, to implement legacy fbdev interface on top of drm driver
@@ -43,6 +48,7 @@ static struct fb_ops msm_fb_ops = {
.fb_fillrect = sys_fillrect,
.fb_copyarea = sys_copyarea,
.fb_imageblit = sys_imageblit,
+ .fb_mmap = msm_fbdev_mmap,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
@@ -51,6 +57,31 @@ static struct fb_ops msm_fb_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
};
+static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = (struct drm_fb_helper *)info->par;
+ struct msm_fbdev *fbdev = to_msm_fbdev(helper);
+ struct drm_gem_object *drm_obj = fbdev->bo;
+ struct drm_device *dev = helper->dev;
+ int ret = 0;
+
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ret) {
+ pr_err("%s:drm_gem_mmap_obj fail\n", __func__);
+ return ret;
+ }
+
+ return msm_gem_mmap_obj(drm_obj, vma);
+}
+
static int msm_fbdev_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
@@ -104,8 +135,16 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
mutex_lock(&dev->struct_mutex);
- /* TODO implement our own fb_mmap so we don't need this: */
- msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
+ /*
+ * NOTE: if we can be guaranteed to be able to map buffer
+ * in panic (ie. lock-safe, etc) we could avoid pinning the
+ * buffer now:
+ */
+ ret = msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
+ if (ret) {
+ dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
+ goto fail;
+ }
fbi = framebuffer_alloc(0, dev->dev);
if (!fbi) {
@@ -177,7 +216,7 @@ static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
DBG("fbdev: get gamma");
}
-static struct drm_fb_helper_funcs msm_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
.gamma_set = msm_crtc_fb_gamma_set,
.gamma_get = msm_crtc_fb_gamma_get,
.fb_probe = msm_fbdev_create,
@@ -189,7 +228,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct msm_fbdev *fbdev = NULL;
struct drm_fb_helper *helper;
- int ret = 0;
+ int ret;
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
if (!fbdev)
@@ -197,7 +236,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
helper = &fbdev->base;
- helper->funcs = &msm_fb_helper_funcs;
+ drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs);
ret = drm_fb_helper_init(dev, helper,
priv->num_crtcs, priv->num_connectors);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 690d7e7b6d1e..4b1b82adabde 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -73,7 +73,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
int npages = obj->size >> PAGE_SHIFT;
if (iommu_present(&platform_bus_type))
- p = drm_gem_get_pages(obj, 0);
+ p = drm_gem_get_pages(obj);
else
p = get_pages_vram(obj, npages);
@@ -278,24 +278,23 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
uint32_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct drm_device *dev = obj->dev;
int ret = 0;
if (!msm_obj->domain[id].iova) {
struct msm_drm_private *priv = obj->dev->dev_private;
- struct msm_mmu *mmu = priv->mmus[id];
struct page **pages = get_pages(obj);
- if (!mmu) {
- dev_err(dev->dev, "null MMU pointer\n");
- return -EINVAL;
- }
-
if (IS_ERR(pages))
return PTR_ERR(pages);
if (iommu_present(&platform_bus_type)) {
- uint32_t offset = (uint32_t)mmap_offset(obj);
+ struct msm_mmu *mmu = priv->mmus[id];
+ uint32_t offset;
+
+ if (WARN_ON(!mmu))
+ return -EINVAL;
+
+ offset = (uint32_t)mmap_offset(obj);
ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
obj->size, IOMMU_READ | IOMMU_WRITE);
msm_obj->domain[id].iova = offset;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index c6322197db8c..4a0dce587745 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -606,14 +606,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
iommu = iommu_domain_alloc(&platform_bus_type);
if (iommu) {
dev_info(drm->dev, "%s: using IOMMU\n", name);
- gpu->mmu = msm_iommu_new(drm, iommu);
+ gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
} else {
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
}
gpu->id = msm_register_mmu(drm, gpu->mmu);
+
/* Create ringbuffer: */
+ mutex_lock(&drm->struct_mutex);
gpu->rb = msm_ringbuffer_new(gpu, ringsz);
+ mutex_unlock(&drm->struct_mutex);
if (IS_ERR(gpu->rb)) {
ret = PTR_ERR(gpu->rb);
gpu->rb = NULL;
@@ -621,13 +624,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
- ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
- if (ret) {
- gpu->rb_iova = 0;
- dev_err(drm->dev, "could not map ringbuffer: %d\n", ret);
- goto fail;
- }
-
bs_init(gpu);
return 0;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 4b2ad9181edf..099af483fdf0 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -33,39 +33,14 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
{
- struct drm_device *dev = mmu->dev;
struct msm_iommu *iommu = to_msm_iommu(mmu);
- int i, ret;
-
- for (i = 0; i < cnt; i++) {
- struct device *msm_iommu_get_ctx(const char *ctx_name);
- struct device *ctx = msm_iommu_get_ctx(names[i]);
- if (IS_ERR_OR_NULL(ctx)) {
- dev_warn(dev->dev, "couldn't get %s context", names[i]);
- continue;
- }
- ret = iommu_attach_device(iommu->domain, ctx);
- if (ret) {
- dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
- return ret;
- }
- }
-
- return 0;
+ return iommu_attach_device(iommu->domain, mmu->dev);
}
static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- int i;
-
- for (i = 0; i < cnt; i++) {
- struct device *msm_iommu_get_ctx(const char *ctx_name);
- struct device *ctx = msm_iommu_get_ctx(names[i]);
- if (IS_ERR_OR_NULL(ctx))
- continue;
- iommu_detach_device(iommu->domain, ctx);
- }
+ iommu_detach_device(iommu->domain, mmu->dev);
}
static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
@@ -149,7 +124,7 @@ static const struct msm_mmu_funcs funcs = {
.destroy = msm_iommu_destroy,
};
-struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain)
+struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
{
struct msm_iommu *iommu;
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 21da6d154f71..7cd88d9dc155 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -32,17 +32,17 @@ struct msm_mmu_funcs {
struct msm_mmu {
const struct msm_mmu_funcs *funcs;
- struct drm_device *dev;
+ struct device *dev;
};
-static inline void msm_mmu_init(struct msm_mmu *mmu, struct drm_device *dev,
+static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
const struct msm_mmu_funcs *funcs)
{
mmu->dev = dev;
mmu->funcs = funcs;
}
-struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain);
-struct msm_mmu *msm_gpummu_new(struct drm_device *dev, struct msm_gpu *gpu);
+struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
+struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index b6dc85c614be..ba29a701ca1d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -309,7 +309,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
struct ttm_buffer_object *bo = &nvbo->bo;
int ret;
- ret = ttm_bo_reserve(bo, false, false, false, 0);
+ ret = ttm_bo_reserve(bo, false, false, false, NULL);
if (ret)
goto out;
@@ -350,7 +350,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
struct ttm_buffer_object *bo = &nvbo->bo;
int ret, ref;
- ret = ttm_bo_reserve(bo, false, false, false, 0);
+ ret = ttm_bo_reserve(bo, false, false, false, NULL);
if (ret)
return ret;
@@ -385,7 +385,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
{
int ret;
- ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 1fa222e8f007..dbdc9ad59546 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -63,7 +63,7 @@ find_encoder(struct drm_connector *connector, int type)
{
struct drm_device *dev = connector->dev;
struct nouveau_encoder *nv_encoder;
- struct drm_mode_object *obj;
+ struct drm_encoder *enc;
int i, id;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
@@ -71,10 +71,10 @@ find_encoder(struct drm_connector *connector, int type)
if (!id)
break;
- obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ enc = drm_encoder_find(dev, id);
+ if (!enc)
continue;
- nv_encoder = nouveau_encoder(obj_to_encoder(obj));
+ nv_encoder = nouveau_encoder(enc);
if (type == DCB_OUTPUT_ANY ||
(nv_encoder->dcb && nv_encoder->dcb->type == type))
@@ -104,7 +104,7 @@ nouveau_connector_destroy(struct drm_connector *connector)
struct nouveau_connector *nv_connector = nouveau_connector(connector);
nouveau_event_ref(NULL, &nv_connector->hpd);
kfree(nv_connector->edid);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
if (nv_connector->aux.transfer)
drm_dp_aux_unregister(&nv_connector->aux);
@@ -119,7 +119,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
struct nouveau_encoder *nv_encoder;
- struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
int i, panel = -ENODEV;
/* eDP panels need powering on by us (if the VBIOS doesn't default it
@@ -139,10 +139,10 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
if (id == 0)
break;
- obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(dev, id);
+ if (!encoder)
continue;
- nv_encoder = nouveau_encoder(obj_to_encoder(obj));
+ nv_encoder = nouveau_encoder(encoder);
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
int ret = nouveau_dp_detect(nv_encoder);
@@ -1236,6 +1236,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
INIT_WORK(&nv_connector->work, nouveau_connector_hotplug_work);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return connector;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 191665ee7f52..758c11cb9a9a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -438,7 +438,7 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info)
info->flags |= FBINFO_HWACCEL_DISABLED;
}
-static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
+static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
.gamma_set = nouveau_fbcon_gamma_set,
.gamma_get = nouveau_fbcon_gamma_get,
.fb_probe = nouveau_fbcon_create,
@@ -464,7 +464,8 @@ nouveau_fbcon_init(struct drm_device *dev)
fbcon->dev = dev;
drm->fbcon = fbcon;
- fbcon->helper.funcs = &nouveau_fbcon_helper_funcs;
+
+ drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
ret = drm_fb_helper_init(dev, &fbcon->helper,
dev->mode_config.num_crtc, 4);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index c90c0dc0afe8..df9d451afdcd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -61,7 +61,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
if (!cli->base.vm)
return 0;
- ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
if (ret)
return ret;
@@ -132,7 +132,7 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
if (!cli->base.vm)
return;
- ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
+ ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
if (ret)
return;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index ab0228f640a5..7e185c122750 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -76,6 +76,7 @@ static int
nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
+ uint32_t flags,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
@@ -162,6 +163,7 @@ static int
nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
+ uint32_t flags,
struct ttm_mem_reg *mem)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
@@ -242,6 +244,7 @@ static int
nv04_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
+ uint32_t flags,
struct ttm_mem_reg *mem)
{
struct nouveau_mem *node;
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 86f4ead0441d..36bc5cc80816 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -130,7 +130,7 @@ static void omap_connector_destroy(struct drm_connector *connector)
struct omap_dss_device *dssdev = omap_connector->dssdev;
DBG("%s", omap_connector->dssdev->name);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(omap_connector);
@@ -307,7 +307,7 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return connector;
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index f926b4caf449..56c60552abba 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -199,7 +199,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
struct page **pages, uint32_t npages, uint32_t roll)
{
- dma_addr_t pat_pa = 0;
+ dma_addr_t pat_pa = 0, data_pa = 0;
uint32_t *data;
struct pat *pat;
struct refill_engine *engine = txn->engine_handle;
@@ -223,7 +223,9 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
.lut_id = engine->tcm->lut_id,
};
- data = alloc_dma(txn, 4*i, &pat->data_pa);
+ data = alloc_dma(txn, 4*i, &data_pa);
+ /* FIXME: what if data_pa is more than 32-bit ? */
+ pat->data_pa = data_pa;
while (i--) {
int n = i + roll;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 284b80fc3c54..b08a450d1b5d 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -119,13 +119,6 @@ struct omap_drm_private {
struct omap_drm_irq error_handler;
};
-/* this should probably be in drm-core to standardize amongst drivers */
-#define DRM_ROTATE_0 0
-#define DRM_ROTATE_90 1
-#define DRM_ROTATE_180 2
-#define DRM_ROTATE_270 3
-#define DRM_REFLECT_X 4
-#define DRM_REFLECT_Y 5
#ifdef CONFIG_DEBUG_FS
int omap_debugfs_init(struct drm_minor *minor);
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 1388ca7f87e8..8436c6857cda 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -281,7 +281,7 @@ fail:
return ret;
}
-static struct drm_fb_helper_funcs omap_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs omap_fb_helper_funcs = {
.fb_probe = omap_fbdev_create,
};
@@ -325,7 +325,7 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
helper = &fbdev->base;
- helper->funcs = &omap_fb_helper_funcs;
+ drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs);
ret = drm_fb_helper_init(dev, helper,
priv->num_crtcs, priv->num_connectors);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 95dbce286a41..e4849413ee80 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -233,11 +233,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
WARN_ON(omap_obj->pages);
- /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
- * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
- * we actually want CMA memory for it all anyways..
- */
- pages = drm_gem_get_pages(obj, GFP_KERNEL);
+ pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
return PTR_ERR(pages);
@@ -791,7 +787,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj,
omap_obj->paddr = tiler_ssptr(block);
omap_obj->block = block;
- DBG("got paddr: %08x", omap_obj->paddr);
+ DBG("got paddr: %pad", &omap_obj->paddr);
}
omap_obj->paddr_cnt++;
@@ -985,9 +981,9 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
off = drm_vma_node_start(&obj->vma_node);
- seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
+ seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
omap_obj->flags, obj->name, obj->refcount.refcount.counter,
- off, omap_obj->paddr, omap_obj->paddr_cnt,
+ off, &omap_obj->paddr, omap_obj->paddr_cnt,
omap_obj->vaddr, omap_obj->roll);
if (omap_obj->flags & OMAP_BO_TILED) {
@@ -1183,9 +1179,7 @@ int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
}
}
spin_unlock(&sync_lock);
-
- if (waiter)
- kfree(waiter);
+ kfree(waiter);
}
return ret;
}
@@ -1347,6 +1341,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj;
struct drm_gem_object *obj = NULL;
+ struct address_space *mapping;
size_t size;
int ret;
@@ -1404,14 +1399,16 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
omap_obj->height = gsize.tiled.height;
}
- ret = 0;
- if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
+ if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
drm_gem_private_object_init(dev, obj, size);
- else
+ } else {
ret = drm_gem_object_init(dev, obj, size);
+ if (ret)
+ goto fail;
- if (ret)
- goto fail;
+ mapping = file_inode(obj->filp)->i_mapping;
+ mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
+ }
return obj;
@@ -1467,8 +1464,8 @@ void omap_gem_init(struct drm_device *dev)
entry->paddr = tiler_ssptr(block);
entry->block = block;
- DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
- entry->paddr,
+ DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
+ &entry->paddr,
usergart[i].stride_pfn << PAGE_SHIFT);
}
}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 3cf31ee59aac..891a4dc608af 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -142,8 +142,8 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply)
DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
info->out_width, info->out_height,
info->screen_width);
- DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
- info->paddr, info->p_uv_addr);
+ DBG("%d,%d %pad %pad", info->pos_x, info->pos_y,
+ &info->paddr, &info->p_uv_addr);
/* TODO: */
ilace = false;
@@ -308,16 +308,13 @@ void omap_plane_install_properties(struct drm_plane *plane,
if (priv->has_dmm) {
prop = priv->rotation_prop;
if (!prop) {
- const struct drm_prop_enum_list props[] = {
- { DRM_ROTATE_0, "rotate-0" },
- { DRM_ROTATE_90, "rotate-90" },
- { DRM_ROTATE_180, "rotate-180" },
- { DRM_ROTATE_270, "rotate-270" },
- { DRM_REFLECT_X, "reflect-x" },
- { DRM_REFLECT_Y, "reflect-y" },
- };
- prop = drm_property_create_bitmask(dev, 0, "rotation",
- props, ARRAY_SIZE(props));
+ prop = drm_mode_create_rotation_property(dev,
+ BIT(DRM_ROTATE_0) |
+ BIT(DRM_ROTATE_90) |
+ BIT(DRM_ROTATE_180) |
+ BIT(DRM_ROTATE_270) |
+ BIT(DRM_REFLECT_X) |
+ BIT(DRM_REFLECT_Y));
if (prop == NULL)
return;
priv->rotation_prop = prop;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 4ec874da5668..bee9f72b3a93 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -5,7 +5,7 @@ config DRM_PANEL
Panel registration and lookup framework.
menu "Display Panels"
- depends on DRM_PANEL
+ depends on DRM && DRM_PANEL
config DRM_PANEL_SIMPLE
tristate "support for simple panels"
@@ -18,14 +18,11 @@ config DRM_PANEL_SIMPLE
config DRM_PANEL_LD9040
tristate "LD9040 RGB/SPI panel"
- depends on DRM && DRM_PANEL
- depends on OF
- select SPI
+ depends on OF && SPI
select VIDEOMODE_HELPERS
config DRM_PANEL_S6E8AA0
tristate "S6E8AA0 DSI video mode panel"
- depends on DRM && DRM_PANEL
depends on OF
select DRM_MIPI_DSI
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-ld9040.c
index db1601fdbe29..42ac67b21e9f 100644
--- a/drivers/gpu/drm/panel/panel-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-ld9040.c
@@ -110,7 +110,10 @@ struct ld9040 {
int error;
};
-#define panel_to_ld9040(p) container_of(p, struct ld9040, panel)
+static inline struct ld9040 *panel_to_ld9040(struct drm_panel *panel)
+{
+ return container_of(panel, struct ld9040, panel);
+}
static int ld9040_clear_error(struct ld9040 *ctx)
{
@@ -216,6 +219,11 @@ static int ld9040_power_off(struct ld9040 *ctx)
static int ld9040_disable(struct drm_panel *panel)
{
+ return 0;
+}
+
+static int ld9040_unprepare(struct drm_panel *panel)
+{
struct ld9040 *ctx = panel_to_ld9040(panel);
msleep(120);
@@ -228,7 +236,7 @@ static int ld9040_disable(struct drm_panel *panel)
return ld9040_power_off(ctx);
}
-static int ld9040_enable(struct drm_panel *panel)
+static int ld9040_prepare(struct drm_panel *panel)
{
struct ld9040 *ctx = panel_to_ld9040(panel);
int ret;
@@ -242,11 +250,16 @@ static int ld9040_enable(struct drm_panel *panel)
ret = ld9040_clear_error(ctx);
if (ret < 0)
- ld9040_disable(panel);
+ ld9040_unprepare(panel);
return ret;
}
+static int ld9040_enable(struct drm_panel *panel)
+{
+ return 0;
+}
+
static int ld9040_get_modes(struct drm_panel *panel)
{
struct drm_connector *connector = panel->connector;
@@ -273,6 +286,8 @@ static int ld9040_get_modes(struct drm_panel *panel)
static const struct drm_panel_funcs ld9040_drm_funcs = {
.disable = ld9040_disable,
+ .unprepare = ld9040_unprepare,
+ .prepare = ld9040_prepare,
.enable = ld9040_enable,
.get_modes = ld9040_get_modes,
};
diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-s6e8aa0.c
index 06e57a26db7a..b5217fe37f02 100644
--- a/drivers/gpu/drm/panel/panel-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-s6e8aa0.c
@@ -120,7 +120,10 @@ struct s6e8aa0 {
int error;
};
-#define panel_to_s6e8aa0(p) container_of(p, struct s6e8aa0, panel)
+static inline struct s6e8aa0 *panel_to_s6e8aa0(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6e8aa0, panel);
+}
static int s6e8aa0_clear_error(struct s6e8aa0 *ctx)
{
@@ -133,14 +136,14 @@ static int s6e8aa0_clear_error(struct s6e8aa0 *ctx)
static void s6e8aa0_dcs_write(struct s6e8aa0 *ctx, const void *data, size_t len)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- int ret;
+ ssize_t ret;
if (ctx->error < 0)
return;
- ret = mipi_dsi_dcs_write(dsi, dsi->channel, data, len);
+ ret = mipi_dsi_dcs_write(dsi, data, len);
if (ret < 0) {
- dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret, len,
+ dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret, len,
data);
ctx->error = ret;
}
@@ -154,7 +157,7 @@ static int s6e8aa0_dcs_read(struct s6e8aa0 *ctx, u8 cmd, void *data, size_t len)
if (ctx->error < 0)
return ctx->error;
- ret = mipi_dsi_dcs_read(dsi, dsi->channel, cmd, data, len);
+ ret = mipi_dsi_dcs_read(dsi, cmd, data, len);
if (ret < 0) {
dev_err(ctx->dev, "error %d reading dcs seq(%#x)\n", ret, cmd);
ctx->error = ret;
@@ -889,6 +892,11 @@ static int s6e8aa0_power_off(struct s6e8aa0 *ctx)
static int s6e8aa0_disable(struct drm_panel *panel)
{
+ return 0;
+}
+
+static int s6e8aa0_unprepare(struct drm_panel *panel)
+{
struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
@@ -900,7 +908,7 @@ static int s6e8aa0_disable(struct drm_panel *panel)
return s6e8aa0_power_off(ctx);
}
-static int s6e8aa0_enable(struct drm_panel *panel)
+static int s6e8aa0_prepare(struct drm_panel *panel)
{
struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel);
int ret;
@@ -913,11 +921,16 @@ static int s6e8aa0_enable(struct drm_panel *panel)
ret = ctx->error;
if (ret < 0)
- s6e8aa0_disable(panel);
+ s6e8aa0_unprepare(panel);
return ret;
}
+static int s6e8aa0_enable(struct drm_panel *panel)
+{
+ return 0;
+}
+
static int s6e8aa0_get_modes(struct drm_panel *panel)
{
struct drm_connector *connector = panel->connector;
@@ -944,6 +957,8 @@ static int s6e8aa0_get_modes(struct drm_panel *panel)
static const struct drm_panel_funcs s6e8aa0_drm_funcs = {
.disable = s6e8aa0_disable,
+ .unprepare = s6e8aa0_unprepare,
+ .prepare = s6e8aa0_prepare,
.enable = s6e8aa0_enable,
.get_modes = s6e8aa0_get_modes,
};
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index a25136132c31..4ce1db0a68ff 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -37,14 +37,35 @@ struct panel_desc {
const struct drm_display_mode *modes;
unsigned int num_modes;
+ unsigned int bpc;
+
struct {
unsigned int width;
unsigned int height;
} size;
+
+ /**
+ * @prepare: the time (in milliseconds) that it takes for the panel to
+ * become ready and start receiving video data
+ * @enable: the time (in milliseconds) that it takes for the panel to
+ * display the first valid frame after starting to receive
+ * video data
+ * @disable: the time (in milliseconds) that it takes for the panel to
+ * turn the display off (no content is visible)
+ * @unprepare: the time (in milliseconds) that it takes for the panel
+ * to power itself down completely
+ */
+ struct {
+ unsigned int prepare;
+ unsigned int enable;
+ unsigned int disable;
+ unsigned int unprepare;
+ } delay;
};
struct panel_simple {
struct drm_panel base;
+ bool prepared;
bool enabled;
const struct panel_desc *desc;
@@ -87,6 +108,7 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel)
num++;
}
+ connector->display_info.bpc = panel->desc->bpc;
connector->display_info.width_mm = panel->desc->size.width;
connector->display_info.height_mm = panel->desc->size.height;
@@ -105,21 +127,40 @@ static int panel_simple_disable(struct drm_panel *panel)
backlight_update_status(p->backlight);
}
+ if (p->desc->delay.disable)
+ msleep(p->desc->delay.disable);
+
+ p->enabled = false;
+
+ return 0;
+}
+
+static int panel_simple_unprepare(struct drm_panel *panel)
+{
+ struct panel_simple *p = to_panel_simple(panel);
+
+ if (!p->prepared)
+ return 0;
+
if (p->enable_gpio)
gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
- p->enabled = false;
+
+ if (p->desc->delay.unprepare)
+ msleep(p->desc->delay.unprepare);
+
+ p->prepared = false;
return 0;
}
-static int panel_simple_enable(struct drm_panel *panel)
+static int panel_simple_prepare(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
int err;
- if (p->enabled)
+ if (p->prepared)
return 0;
err = regulator_enable(p->supply);
@@ -131,6 +172,24 @@ static int panel_simple_enable(struct drm_panel *panel)
if (p->enable_gpio)
gpiod_set_value_cansleep(p->enable_gpio, 1);
+ if (p->desc->delay.prepare)
+ msleep(p->desc->delay.prepare);
+
+ p->prepared = true;
+
+ return 0;
+}
+
+static int panel_simple_enable(struct drm_panel *panel)
+{
+ struct panel_simple *p = to_panel_simple(panel);
+
+ if (p->enabled)
+ return 0;
+
+ if (p->desc->delay.enable)
+ msleep(p->desc->delay.enable);
+
if (p->backlight) {
p->backlight->props.power = FB_BLANK_UNBLANK;
backlight_update_status(p->backlight);
@@ -164,6 +223,8 @@ static int panel_simple_get_modes(struct drm_panel *panel)
static const struct drm_panel_funcs panel_simple_funcs = {
.disable = panel_simple_disable,
+ .unprepare = panel_simple_unprepare,
+ .prepare = panel_simple_prepare,
.enable = panel_simple_enable,
.get_modes = panel_simple_get_modes,
};
@@ -179,22 +240,21 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
return -ENOMEM;
panel->enabled = false;
+ panel->prepared = false;
panel->desc = desc;
panel->supply = devm_regulator_get(dev, "power");
if (IS_ERR(panel->supply))
return PTR_ERR(panel->supply);
- panel->enable_gpio = devm_gpiod_get(dev, "enable");
+ panel->enable_gpio = devm_gpiod_get_optional(dev, "enable");
if (IS_ERR(panel->enable_gpio)) {
err = PTR_ERR(panel->enable_gpio);
- if (err != -ENOENT) {
- dev_err(dev, "failed to request GPIO: %d\n", err);
- return err;
- }
+ dev_err(dev, "failed to request GPIO: %d\n", err);
+ return err;
+ }
- panel->enable_gpio = NULL;
- } else {
+ if (panel->enable_gpio) {
err = gpiod_direction_output(panel->enable_gpio, 0);
if (err < 0) {
dev_err(dev, "failed to setup GPIO: %d\n", err);
@@ -285,6 +345,7 @@ static const struct drm_display_mode auo_b101aw03_mode = {
static const struct panel_desc auo_b101aw03 = {
.modes = &auo_b101aw03_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 223,
.height = 125,
@@ -307,12 +368,40 @@ static const struct drm_display_mode auo_b133xtn01_mode = {
static const struct panel_desc auo_b133xtn01 = {
.modes = &auo_b133xtn01_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 293,
.height = 165,
},
};
+static const struct drm_display_mode auo_b133htn01_mode = {
+ .clock = 150660,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 172,
+ .hsync_end = 1920 + 172 + 80,
+ .htotal = 1920 + 172 + 80 + 60,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 25,
+ .vsync_end = 1080 + 25 + 10,
+ .vtotal = 1080 + 25 + 10 + 10,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_b133htn01 = {
+ .modes = &auo_b133htn01_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 293,
+ .height = 165,
+ },
+ .delay = {
+ .prepare = 105,
+ .enable = 20,
+ .unprepare = 50,
+ },
+};
+
static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
.clock = 72070,
.hdisplay = 1366,
@@ -329,6 +418,7 @@ static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
static const struct panel_desc chunghwa_claa101wa01a = {
.modes = &chunghwa_claa101wa01a_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 220,
.height = 120,
@@ -351,6 +441,7 @@ static const struct drm_display_mode chunghwa_claa101wb01_mode = {
static const struct panel_desc chunghwa_claa101wb01 = {
.modes = &chunghwa_claa101wb01_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 223,
.height = 125,
@@ -374,6 +465,7 @@ static const struct drm_display_mode edt_et057090dhu_mode = {
static const struct panel_desc edt_et057090dhu = {
.modes = &edt_et057090dhu_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 115,
.height = 86,
@@ -397,12 +489,82 @@ static const struct drm_display_mode edt_etm0700g0dh6_mode = {
static const struct panel_desc edt_etm0700g0dh6 = {
.modes = &edt_etm0700g0dh6_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 152,
.height = 91,
},
};
+static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
+ .clock = 32260,
+ .hdisplay = 800,
+ .hsync_start = 800 + 168,
+ .hsync_end = 800 + 168 + 64,
+ .htotal = 800 + 168 + 64 + 88,
+ .vdisplay = 480,
+ .vsync_start = 480 + 37,
+ .vsync_end = 480 + 37 + 2,
+ .vtotal = 480 + 37 + 2 + 8,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc foxlink_fl500wvr00_a0t = {
+ .modes = &foxlink_fl500wvr00_a0t_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 108,
+ .height = 65,
+ },
+};
+
+static const struct drm_display_mode innolux_n116bge_mode = {
+ .clock = 71000,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 64,
+ .hsync_end = 1366 + 64 + 6,
+ .htotal = 1366 + 64 + 6 + 64,
+ .vdisplay = 768,
+ .vsync_start = 768 + 8,
+ .vsync_end = 768 + 8 + 4,
+ .vtotal = 768 + 8 + 4 + 8,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc innolux_n116bge = {
+ .modes = &innolux_n116bge_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+};
+
+static const struct drm_display_mode innolux_n156bge_l21_mode = {
+ .clock = 69300,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 16,
+ .hsync_end = 1366 + 16 + 34,
+ .htotal = 1366 + 16 + 34 + 50,
+ .vdisplay = 768,
+ .vsync_start = 768 + 2,
+ .vsync_end = 768 + 2 + 6,
+ .vtotal = 768 + 2 + 6 + 12,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc innolux_n156bge_l21 = {
+ .modes = &innolux_n156bge_l21_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 344,
+ .height = 193,
+ },
+};
+
static const struct drm_display_mode lg_lp129qe_mode = {
.clock = 285250,
.hdisplay = 2560,
@@ -419,6 +581,7 @@ static const struct drm_display_mode lg_lp129qe_mode = {
static const struct panel_desc lg_lp129qe = {
.modes = &lg_lp129qe_mode,
.num_modes = 1,
+ .bpc = 8,
.size = {
.width = 272,
.height = 181,
@@ -441,6 +604,7 @@ static const struct drm_display_mode samsung_ltn101nt05_mode = {
static const struct panel_desc samsung_ltn101nt05 = {
.modes = &samsung_ltn101nt05_mode,
.num_modes = 1,
+ .bpc = 6,
.size = {
.width = 1024,
.height = 600,
@@ -452,6 +616,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b101aw03",
.data = &auo_b101aw03,
}, {
+ .compatible = "auo,b133htn01",
+ .data = &auo_b133htn01,
+ }, {
.compatible = "auo,b133xtn01",
.data = &auo_b133xtn01,
}, {
@@ -470,14 +637,21 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "edt,etm0700g0dh6",
.data = &edt_etm0700g0dh6,
}, {
+ .compatible = "foxlink,fl500wvr00-a0t",
+ .data = &foxlink_fl500wvr00_a0t,
+ }, {
+ .compatible = "innolux,n116bge",
+ .data = &innolux_n116bge,
+ }, {
+ .compatible = "innolux,n156bge-l21",
+ .data = &innolux_n156bge_l21,
+ }, {
.compatible = "lg,lp129qe",
.data = &lg_lp129qe,
}, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
}, {
- .compatible = "simple-panel",
- }, {
/* sentinel */
}
};
@@ -545,7 +719,7 @@ static const struct panel_desc_dsi lg_ld070wx3_sl01 = {
.height = 151,
},
},
- .flags = MIPI_DSI_MODE_VIDEO,
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS,
.format = MIPI_DSI_FMT_RGB888,
.lanes = 4,
};
@@ -599,7 +773,8 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
.height = 136,
},
},
- .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS,
.format = MIPI_DSI_FMT_RGB888,
.lanes = 4,
};
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 5d7ea2461852..b8ced08b6291 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -835,7 +835,7 @@ static void qxl_conn_destroy(struct drm_connector *connector)
struct qxl_output *qxl_output =
drm_connector_to_qxl_output(connector);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(qxl_output);
}
@@ -902,7 +902,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
drm_object_attach_property(&connector->base,
qdev->hotplug_mode_update_property, 0);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index f437b30ce689..df567888bb1e 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -660,7 +660,7 @@ static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev)
return 0;
}
-static struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = {
.fb_probe = qxl_fb_find_or_create_single,
};
@@ -676,9 +676,12 @@ int qxl_fbdev_init(struct qxl_device *qdev)
qfbdev->qdev = qdev;
qdev->mode_info.qfbdev = qfbdev;
- qfbdev->helper.funcs = &qxl_fb_helper_funcs;
spin_lock_init(&qfbdev->delayed_ops_lock);
INIT_LIST_HEAD(&qfbdev->delayed_ops);
+
+ drm_fb_helper_prepare(qdev->ddev, &qfbdev->helper,
+ &qxl_fb_helper_funcs);
+
ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
qxl_num_crtc /* num_crtc - QXL supports just 1 */,
QXLFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index d458a140c024..83a423293afd 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -31,7 +31,7 @@ static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
{
int r;
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
@@ -67,7 +67,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
{
int r;
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+ r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index dbcbfe80aac0..0013ad0db9ef 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -80,7 +80,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
- ci_dpm.o dce6_afmt.o radeon_vm.o
+ ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o
# add async DMA block
radeon-y += \
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 7d68203a3737..a7f2ddf09a9d 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -331,12 +331,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
- /* get the native mode for LVDS */
- if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+ /* get the native mode for scaling */
+ if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_panel_mode_fixup(encoder, adjusted_mode);
-
- /* get the native mode for TV */
- if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
+ } else if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
if (tv_dac) {
if (tv_dac->tv_std == TV_STD_NTSC ||
@@ -346,6 +344,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
else
radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
}
+ } else if (radeon_encoder->rmx_type != RMX_OFF) {
+ radeon_panel_mode_fixup(encoder, adjusted_mode);
}
if (ASIC_IS_DCE3(rdev) &&
@@ -716,7 +716,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if (radeon_connector->use_digital &&
(radeon_connector->audio == RADEON_AUDIO_ENABLE))
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
(radeon_connector->audio == RADEON_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
@@ -735,7 +735,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if (radeon_audio != 0) {
if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
(radeon_connector->audio == RADEON_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
@@ -755,7 +755,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
} else if (radeon_audio != 0) {
if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
(radeon_connector->audio == RADEON_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 584090ac3eb9..022561e28707 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -940,7 +940,18 @@ static void ci_get_leakage_voltages(struct radeon_device *rdev)
pi->vddc_leakage.count = 0;
pi->vddci_leakage.count = 0;
- if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+ for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
+ virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+ if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
+ continue;
+ if (vddc != 0 && vddc != virtual_voltage_id) {
+ pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
+ pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
+ pi->vddc_leakage.count++;
+ }
+ }
+ } else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index 8debc9d47362..b630edc2fd0c 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -213,24 +213,37 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
if (!rdev->smc_fw)
return -EINVAL;
- switch (rdev->family) {
- case CHIP_BONAIRE:
- ucode_start_address = BONAIRE_SMC_UCODE_START;
- ucode_size = BONAIRE_SMC_UCODE_SIZE;
- break;
- case CHIP_HAWAII:
- ucode_start_address = HAWAII_SMC_UCODE_START;
- ucode_size = HAWAII_SMC_UCODE_SIZE;
- break;
- default:
- DRM_ERROR("unknown asic in smc ucode loader\n");
- BUG();
+ if (rdev->new_fw) {
+ const struct smc_firmware_header_v1_0 *hdr =
+ (const struct smc_firmware_header_v1_0 *)rdev->smc_fw->data;
+
+ radeon_ucode_print_smc_hdr(&hdr->header);
+
+ ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+ src = (const u8 *)
+ (rdev->smc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ } else {
+ switch (rdev->family) {
+ case CHIP_BONAIRE:
+ ucode_start_address = BONAIRE_SMC_UCODE_START;
+ ucode_size = BONAIRE_SMC_UCODE_SIZE;
+ break;
+ case CHIP_HAWAII:
+ ucode_start_address = HAWAII_SMC_UCODE_START;
+ ucode_size = HAWAII_SMC_UCODE_SIZE;
+ break;
+ default:
+ DRM_ERROR("unknown asic in smc ucode loader\n");
+ BUG();
+ }
+
+ src = (const u8 *)rdev->smc_fw->data;
}
if (ucode_size & 3)
return -EINVAL;
- src = (const u8 *)rdev->smc_fw->data;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(SMC_IND_INDEX_0, ucode_start_address);
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index c0ea66192fe0..b625646bf3e2 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -42,6 +42,16 @@ MODULE_FIRMWARE("radeon/BONAIRE_mc2.bin");
MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
+
+MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
+MODULE_FIRMWARE("radeon/bonaire_me.bin");
+MODULE_FIRMWARE("radeon/bonaire_ce.bin");
+MODULE_FIRMWARE("radeon/bonaire_mec.bin");
+MODULE_FIRMWARE("radeon/bonaire_mc.bin");
+MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
+MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
+MODULE_FIRMWARE("radeon/bonaire_smc.bin");
+
MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
MODULE_FIRMWARE("radeon/HAWAII_me.bin");
MODULE_FIRMWARE("radeon/HAWAII_ce.bin");
@@ -51,18 +61,45 @@ MODULE_FIRMWARE("radeon/HAWAII_mc2.bin");
MODULE_FIRMWARE("radeon/HAWAII_rlc.bin");
MODULE_FIRMWARE("radeon/HAWAII_sdma.bin");
MODULE_FIRMWARE("radeon/HAWAII_smc.bin");
+
+MODULE_FIRMWARE("radeon/hawaii_pfp.bin");
+MODULE_FIRMWARE("radeon/hawaii_me.bin");
+MODULE_FIRMWARE("radeon/hawaii_ce.bin");
+MODULE_FIRMWARE("radeon/hawaii_mec.bin");
+MODULE_FIRMWARE("radeon/hawaii_mc.bin");
+MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
+MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
+MODULE_FIRMWARE("radeon/hawaii_smc.bin");
+
MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
MODULE_FIRMWARE("radeon/KAVERI_me.bin");
MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
MODULE_FIRMWARE("radeon/KAVERI_mec.bin");
MODULE_FIRMWARE("radeon/KAVERI_rlc.bin");
MODULE_FIRMWARE("radeon/KAVERI_sdma.bin");
+
+MODULE_FIRMWARE("radeon/kaveri_pfp.bin");
+MODULE_FIRMWARE("radeon/kaveri_me.bin");
+MODULE_FIRMWARE("radeon/kaveri_ce.bin");
+MODULE_FIRMWARE("radeon/kaveri_mec.bin");
+MODULE_FIRMWARE("radeon/kaveri_mec2.bin");
+MODULE_FIRMWARE("radeon/kaveri_rlc.bin");
+MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
+
MODULE_FIRMWARE("radeon/KABINI_pfp.bin");
MODULE_FIRMWARE("radeon/KABINI_me.bin");
MODULE_FIRMWARE("radeon/KABINI_ce.bin");
MODULE_FIRMWARE("radeon/KABINI_mec.bin");
MODULE_FIRMWARE("radeon/KABINI_rlc.bin");
MODULE_FIRMWARE("radeon/KABINI_sdma.bin");
+
+MODULE_FIRMWARE("radeon/kabini_pfp.bin");
+MODULE_FIRMWARE("radeon/kabini_me.bin");
+MODULE_FIRMWARE("radeon/kabini_ce.bin");
+MODULE_FIRMWARE("radeon/kabini_mec.bin");
+MODULE_FIRMWARE("radeon/kabini_rlc.bin");
+MODULE_FIRMWARE("radeon/kabini_sdma.bin");
+
MODULE_FIRMWARE("radeon/MULLINS_pfp.bin");
MODULE_FIRMWARE("radeon/MULLINS_me.bin");
MODULE_FIRMWARE("radeon/MULLINS_ce.bin");
@@ -70,6 +107,13 @@ MODULE_FIRMWARE("radeon/MULLINS_mec.bin");
MODULE_FIRMWARE("radeon/MULLINS_rlc.bin");
MODULE_FIRMWARE("radeon/MULLINS_sdma.bin");
+MODULE_FIRMWARE("radeon/mullins_pfp.bin");
+MODULE_FIRMWARE("radeon/mullins_me.bin");
+MODULE_FIRMWARE("radeon/mullins_ce.bin");
+MODULE_FIRMWARE("radeon/mullins_mec.bin");
+MODULE_FIRMWARE("radeon/mullins_rlc.bin");
+MODULE_FIRMWARE("radeon/mullins_sdma.bin");
+
extern int r600_ih_ring_alloc(struct radeon_device *rdev);
extern void r600_ih_ring_fini(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
@@ -1760,27 +1804,44 @@ static void cik_srbm_select(struct radeon_device *rdev,
*/
int ci_mc_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
+ const __be32 *fw_data = NULL;
+ const __le32 *new_fw_data = NULL;
u32 running, blackout = 0;
- u32 *io_mc_regs;
+ u32 *io_mc_regs = NULL;
+ const __le32 *new_io_mc_regs = NULL;
int i, regs_size, ucode_size;
if (!rdev->mc_fw)
return -EINVAL;
- ucode_size = rdev->mc_fw->size / 4;
+ if (rdev->new_fw) {
+ const struct mc_firmware_header_v1_0 *hdr =
+ (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
- switch (rdev->family) {
- case CHIP_BONAIRE:
- io_mc_regs = (u32 *)&bonaire_io_mc_regs;
- regs_size = BONAIRE_IO_MC_REGS_SIZE;
- break;
- case CHIP_HAWAII:
- io_mc_regs = (u32 *)&hawaii_io_mc_regs;
- regs_size = HAWAII_IO_MC_REGS_SIZE;
- break;
- default:
- return -EINVAL;
+ radeon_ucode_print_mc_hdr(&hdr->header);
+
+ regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
+ new_io_mc_regs = (const __le32 *)
+ (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ new_fw_data = (const __le32 *)
+ (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ } else {
+ ucode_size = rdev->mc_fw->size / 4;
+
+ switch (rdev->family) {
+ case CHIP_BONAIRE:
+ io_mc_regs = (u32 *)&bonaire_io_mc_regs;
+ regs_size = BONAIRE_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_HAWAII:
+ io_mc_regs = (u32 *)&hawaii_io_mc_regs;
+ regs_size = HAWAII_IO_MC_REGS_SIZE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ fw_data = (const __be32 *)rdev->mc_fw->data;
}
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -1797,13 +1858,21 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
/* load mc io regs */
for (i = 0; i < regs_size; i++) {
- WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
- WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ if (rdev->new_fw) {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
+ WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
+ } else {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ }
}
/* load the MC ucode */
- fw_data = (const __be32 *)rdev->mc_fw->data;
- for (i = 0; i < ucode_size; i++)
- WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+ for (i = 0; i < ucode_size; i++) {
+ if (rdev->new_fw)
+ WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
+ else
+ WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+ }
/* put the engine back into the active state */
WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
@@ -1841,17 +1910,21 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
static int cik_init_microcode(struct radeon_device *rdev)
{
const char *chip_name;
+ const char *new_chip_name;
size_t pfp_req_size, me_req_size, ce_req_size,
mec_req_size, rlc_req_size, mc_req_size = 0,
sdma_req_size, smc_req_size = 0, mc2_req_size = 0;
char fw_name[30];
+ int new_fw = 0;
int err;
+ int num_fw;
DRM_DEBUG("\n");
switch (rdev->family) {
case CHIP_BONAIRE:
chip_name = "BONAIRE";
+ new_chip_name = "bonaire";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
@@ -1861,9 +1934,11 @@ static int cik_init_microcode(struct radeon_device *rdev)
mc2_req_size = BONAIRE_MC2_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
+ num_fw = 8;
break;
case CHIP_HAWAII:
chip_name = "HAWAII";
+ new_chip_name = "hawaii";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
@@ -1873,142 +1948,285 @@ static int cik_init_microcode(struct radeon_device *rdev)
mc2_req_size = HAWAII_MC2_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
smc_req_size = ALIGN(HAWAII_SMC_UCODE_SIZE, 4);
+ num_fw = 8;
break;
case CHIP_KAVERI:
chip_name = "KAVERI";
+ new_chip_name = "kaveri";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
mec_req_size = CIK_MEC_UCODE_SIZE * 4;
rlc_req_size = KV_RLC_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+ num_fw = 7;
break;
case CHIP_KABINI:
chip_name = "KABINI";
+ new_chip_name = "kabini";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
mec_req_size = CIK_MEC_UCODE_SIZE * 4;
rlc_req_size = KB_RLC_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+ num_fw = 6;
break;
case CHIP_MULLINS:
chip_name = "MULLINS";
+ new_chip_name = "mullins";
pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
me_req_size = CIK_ME_UCODE_SIZE * 4;
ce_req_size = CIK_CE_UCODE_SIZE * 4;
mec_req_size = CIK_MEC_UCODE_SIZE * 4;
rlc_req_size = ML_RLC_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+ num_fw = 6;
break;
default: BUG();
}
- DRM_INFO("Loading %s Microcode\n", chip_name);
+ DRM_INFO("Loading %s Microcode\n", new_chip_name);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->pfp_fw->size != pfp_req_size) {
- printk(KERN_ERR
- "cik_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->pfp_fw->size, fw_name);
- err = -EINVAL;
- goto out;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->pfp_fw->size != pfp_req_size) {
+ printk(KERN_ERR
+ "cik_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->pfp_fw->size, fw_name);
+ err = -EINVAL;
+ goto out;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->pfp_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->me_fw->size != me_req_size) {
- printk(KERN_ERR
- "cik_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->me_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->me_fw->size != me_req_size) {
+ printk(KERN_ERR
+ "cik_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->me_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->me_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->ce_fw->size != ce_req_size) {
- printk(KERN_ERR
- "cik_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->ce_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->ce_fw->size != ce_req_size) {
+ printk(KERN_ERR
+ "cik_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->ce_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->ce_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", new_chip_name);
err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->mec_fw->size != mec_req_size) {
- printk(KERN_ERR
- "cik_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->mec_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name);
+ err = request_firmware(&rdev->mec_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->mec_fw->size != mec_req_size) {
+ printk(KERN_ERR
+ "cik_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->mec_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->mec_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
+ }
+
+ if (rdev->family == CHIP_KAVERI) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", new_chip_name);
+ err = request_firmware(&rdev->mec2_fw, fw_name, rdev->dev);
+ if (err) {
+ goto out;
+ } else {
+ err = radeon_ucode_validate(rdev->mec2_fw);
+ if (err) {
+ goto out;
+ } else {
+ new_fw++;
+ }
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->rlc_fw->size != rlc_req_size) {
- printk(KERN_ERR
- "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
- rdev->rlc_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->rlc_fw->size != rlc_req_size) {
+ printk(KERN_ERR
+ "cik_rlc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->rlc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->rlc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", new_chip_name);
err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->sdma_fw->size != sdma_req_size) {
- printk(KERN_ERR
- "cik_sdma: Bogus length %zu in firmware \"%s\"\n",
- rdev->sdma_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
+ err = request_firmware(&rdev->sdma_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->sdma_fw->size != sdma_req_size) {
+ printk(KERN_ERR
+ "cik_sdma: Bogus length %zu in firmware \"%s\"\n",
+ rdev->sdma_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->sdma_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
/* No SMC, MC ucode on APUs */
if (!(rdev->flags & RADEON_IS_IGP)) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
if (err) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
- if (err)
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ }
+ if ((rdev->mc_fw->size != mc_req_size) &&
+ (rdev->mc_fw->size != mc2_req_size)){
+ printk(KERN_ERR
+ "cik_mc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->mc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
+ } else {
+ err = radeon_ucode_validate(rdev->mc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
goto out;
+ } else {
+ new_fw++;
+ }
}
- if ((rdev->mc_fw->size != mc_req_size) &&
- (rdev->mc_fw->size != mc2_req_size)){
- printk(KERN_ERR
- "cik_mc: Bogus length %zu in firmware \"%s\"\n",
- rdev->mc_fw->size, fw_name);
- err = -EINVAL;
- }
- DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
if (err) {
- printk(KERN_ERR
- "smc: error loading firmware \"%s\"\n",
- fw_name);
- release_firmware(rdev->smc_fw);
- rdev->smc_fw = NULL;
- err = 0;
- } else if (rdev->smc_fw->size != smc_req_size) {
- printk(KERN_ERR
- "cik_smc: Bogus length %zu in firmware \"%s\"\n",
- rdev->smc_fw->size, fw_name);
- err = -EINVAL;
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ err = 0;
+ } else if (rdev->smc_fw->size != smc_req_size) {
+ printk(KERN_ERR
+ "cik_smc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->smc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->smc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "cik_fw: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
}
+ if (new_fw == 0) {
+ rdev->new_fw = false;
+ } else if (new_fw < num_fw) {
+ printk(KERN_ERR "ci_fw: mixing new and old firmware!\n");
+ err = -EINVAL;
+ } else {
+ rdev->new_fw = true;
+ }
+
out:
if (err) {
if (err != -EINVAL)
@@ -2021,8 +2239,14 @@ out:
rdev->me_fw = NULL;
release_firmware(rdev->ce_fw);
rdev->ce_fw = NULL;
+ release_firmware(rdev->mec_fw);
+ rdev->mec_fw = NULL;
+ release_firmware(rdev->mec2_fw);
+ rdev->mec2_fw = NULL;
release_firmware(rdev->rlc_fw);
rdev->rlc_fw = NULL;
+ release_firmware(rdev->sdma_fw);
+ rdev->sdma_fw = NULL;
release_firmware(rdev->mc_fw);
rdev->mc_fw = NULL;
release_firmware(rdev->smc_fw);
@@ -3666,8 +3890,6 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
- /* HDP flush */
- cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
}
/**
@@ -3696,8 +3918,6 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, upper_32_bits(addr));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
- /* HDP flush */
- cik_hdp_flush_cp_ring_emit(rdev, fence->ring);
}
bool cik_semaphore_ring_emit(struct radeon_device *rdev,
@@ -3969,7 +4189,6 @@ static void cik_cp_gfx_enable(struct radeon_device *rdev, bool enable)
*/
static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
int i;
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
@@ -3977,26 +4196,70 @@ static int cik_cp_gfx_load_microcode(struct radeon_device *rdev)
cik_cp_gfx_enable(rdev, false);
- /* PFP */
- fw_data = (const __be32 *)rdev->pfp_fw->data;
- WREG32(CP_PFP_UCODE_ADDR, 0);
- for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
- WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_PFP_UCODE_ADDR, 0);
-
- /* CE */
- fw_data = (const __be32 *)rdev->ce_fw->data;
- WREG32(CP_CE_UCODE_ADDR, 0);
- for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
- WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_CE_UCODE_ADDR, 0);
-
- /* ME */
- fw_data = (const __be32 *)rdev->me_fw->data;
- WREG32(CP_ME_RAM_WADDR, 0);
- for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
- WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_ME_RAM_WADDR, 0);
+ if (rdev->new_fw) {
+ const struct gfx_firmware_header_v1_0 *pfp_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
+ const struct gfx_firmware_header_v1_0 *ce_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
+ const struct gfx_firmware_header_v1_0 *me_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+ radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
+ radeon_ucode_print_gfx_hdr(&ce_hdr->header);
+ radeon_ucode_print_gfx_hdr(&me_hdr->header);
+
+ /* PFP */
+ fw_data = (const __le32 *)
+ (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __le32 *)
+ (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)
+ (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
+ } else {
+ const __be32 *fw_data;
+
+ /* PFP */
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < CIK_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __be32 *)rdev->ce_fw->data;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < CIK_CE_UCODE_SIZE; i++)
+ WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < CIK_ME_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
+ }
WREG32(CP_PFP_UCODE_ADDR, 0);
WREG32(CP_CE_UCODE_ADDR, 0);
@@ -4261,7 +4524,6 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
*/
static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
int i;
if (!rdev->mec_fw)
@@ -4269,20 +4531,55 @@ static int cik_cp_compute_load_microcode(struct radeon_device *rdev)
cik_cp_compute_enable(rdev, false);
- /* MEC1 */
- fw_data = (const __be32 *)rdev->mec_fw->data;
- WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
- for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
- WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
+ if (rdev->new_fw) {
+ const struct gfx_firmware_header_v1_0 *mec_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+ radeon_ucode_print_gfx_hdr(&mec_hdr->header);
+
+ /* MEC1 */
+ fw_data = (const __le32 *)
+ (rdev->mec_fw->data + le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
- if (rdev->family == CHIP_KAVERI) {
/* MEC2 */
+ if (rdev->family == CHIP_KAVERI) {
+ const struct gfx_firmware_header_v1_0 *mec2_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data;
+
+ fw_data = (const __le32 *)
+ (rdev->mec2_fw->data +
+ le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ }
+ } else {
+ const __be32 *fw_data;
+
+ /* MEC1 */
fw_data = (const __be32 *)rdev->mec_fw->data;
- WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
- WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ WREG32(CP_MEC_ME1_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_MEC_ME1_UCODE_ADDR, 0);
+
+ if (rdev->family == CHIP_KAVERI) {
+ /* MEC2 */
+ fw_data = (const __be32 *)rdev->mec_fw->data;
+ WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ for (i = 0; i < CIK_MEC_UCODE_SIZE; i++)
+ WREG32(CP_MEC_ME2_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_MEC_ME2_UCODE_ADDR, 0);
+ }
}
return 0;
@@ -4375,7 +4672,7 @@ static int cik_mec_init(struct radeon_device *rdev)
r = radeon_bo_create(rdev,
rdev->mec.num_mec *rdev->mec.num_pipe * MEC_HPD_SIZE * 2,
PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, NULL,
+ RADEON_GEM_DOMAIN_GTT, 0, NULL,
&rdev->mec.hpd_eop_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -4545,7 +4842,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
r = radeon_bo_create(rdev,
sizeof(struct bonaire_mqd),
PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, NULL,
+ RADEON_GEM_DOMAIN_GTT, 0, NULL,
&rdev->ring[idx].mqd_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create MQD bo failed\n", r);
@@ -5402,7 +5699,6 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
@@ -5642,12 +5938,13 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
struct radeon_ring *ring = &rdev->ring[ridx];
+ int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX);
if (vm == NULL)
return;
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
if (vm->id < 8) {
radeon_ring_write(ring,
@@ -5697,7 +5994,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, 1 << vm->id);
/* compute doesn't have PFP */
- if (ridx == RADEON_RING_TYPE_GFX_INDEX) {
+ if (usepfp) {
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
@@ -5865,28 +6162,10 @@ static void cik_rlc_start(struct radeon_device *rdev)
static int cik_rlc_resume(struct radeon_device *rdev)
{
u32 i, size, tmp;
- const __be32 *fw_data;
if (!rdev->rlc_fw)
return -EINVAL;
- switch (rdev->family) {
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- default:
- size = BONAIRE_RLC_UCODE_SIZE;
- break;
- case CHIP_KAVERI:
- size = KV_RLC_UCODE_SIZE;
- break;
- case CHIP_KABINI:
- size = KB_RLC_UCODE_SIZE;
- break;
- case CHIP_MULLINS:
- size = ML_RLC_UCODE_SIZE;
- break;
- }
-
cik_rlc_stop(rdev);
/* disable CG */
@@ -5910,11 +6189,45 @@ static int cik_rlc_resume(struct radeon_device *rdev)
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
- fw_data = (const __be32 *)rdev->rlc_fw->data;
+ if (rdev->new_fw) {
+ const struct rlc_firmware_header_v1_0 *hdr =
+ (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
+ const __le32 *fw_data = (const __le32 *)
+ (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ radeon_ucode_print_rlc_hdr(&hdr->header);
+
+ size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
WREG32(RLC_GPM_UCODE_ADDR, 0);
- for (i = 0; i < size; i++)
- WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(RLC_GPM_UCODE_ADDR, 0);
+ for (i = 0; i < size; i++)
+ WREG32(RLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(RLC_GPM_UCODE_ADDR, 0);
+ } else {
+ const __be32 *fw_data;
+
+ switch (rdev->family) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ default:
+ size = BONAIRE_RLC_UCODE_SIZE;
+ break;
+ case CHIP_KAVERI:
+ size = KV_RLC_UCODE_SIZE;
+ break;
+ case CHIP_KABINI:
+ size = KB_RLC_UCODE_SIZE;
+ break;
+ case CHIP_MULLINS:
+ size = ML_RLC_UCODE_SIZE;
+ break;
+ }
+
+ fw_data = (const __be32 *)rdev->rlc_fw->data;
+ WREG32(RLC_GPM_UCODE_ADDR, 0);
+ for (i = 0; i < size; i++)
+ WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(RLC_GPM_UCODE_ADDR, 0);
+ }
/* XXX - find out what chips support lbpw */
cik_enable_lbpw(rdev, false);
@@ -6348,11 +6661,10 @@ static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable)
void cik_init_cp_pg_table(struct radeon_device *rdev)
{
- const __be32 *fw_data;
volatile u32 *dst_ptr;
int me, i, max_me = 4;
u32 bo_offset = 0;
- u32 table_offset;
+ u32 table_offset, table_size;
if (rdev->family == CHIP_KAVERI)
max_me = 5;
@@ -6363,24 +6675,71 @@ void cik_init_cp_pg_table(struct radeon_device *rdev)
/* write the cp table buffer */
dst_ptr = rdev->rlc.cp_table_ptr;
for (me = 0; me < max_me; me++) {
- if (me == 0) {
- fw_data = (const __be32 *)rdev->ce_fw->data;
- table_offset = CP_ME_TABLE_OFFSET;
- } else if (me == 1) {
- fw_data = (const __be32 *)rdev->pfp_fw->data;
- table_offset = CP_ME_TABLE_OFFSET;
- } else if (me == 2) {
- fw_data = (const __be32 *)rdev->me_fw->data;
- table_offset = CP_ME_TABLE_OFFSET;
+ if (rdev->new_fw) {
+ const __le32 *fw_data;
+ const struct gfx_firmware_header_v1_0 *hdr;
+
+ if (me == 0) {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->ce_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 1) {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->pfp_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 2) {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->me_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 3) {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->mec_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else {
+ hdr = (const struct gfx_firmware_header_v1_0 *)rdev->mec2_fw->data;
+ fw_data = (const __le32 *)
+ (rdev->mec2_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ }
+
+ for (i = 0; i < table_size; i ++) {
+ dst_ptr[bo_offset + i] =
+ cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+ }
+ bo_offset += table_size;
} else {
- fw_data = (const __be32 *)rdev->mec_fw->data;
- table_offset = CP_MEC_TABLE_OFFSET;
- }
+ const __be32 *fw_data;
+ table_size = CP_ME_TABLE_SIZE;
+
+ if (me == 0) {
+ fw_data = (const __be32 *)rdev->ce_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else if (me == 1) {
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else if (me == 2) {
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else {
+ fw_data = (const __be32 *)rdev->mec_fw->data;
+ table_offset = CP_MEC_TABLE_OFFSET;
+ }
- for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
- dst_ptr[bo_offset + i] = cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
+ for (i = 0; i < table_size; i ++) {
+ dst_ptr[bo_offset + i] =
+ cpu_to_le32(be32_to_cpu(fw_data[table_offset + i]));
+ }
+ bo_offset += table_size;
}
- bo_offset += CP_ME_TABLE_SIZE;
}
}
@@ -7618,7 +7977,8 @@ restart_ih:
case 16: /* D5 page flip */
case 18: /* D6 page flip */
DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
- radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
break;
case 42: /* HPD hotplug */
switch (src_data) {
@@ -7900,6 +8260,7 @@ restart_ih:
static int cik_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
+ u32 nop;
int r;
/* enable pcie gen2/3 link */
@@ -8033,9 +8394,18 @@ static int cik_startup(struct radeon_device *rdev)
}
cik_irq_set(rdev);
+ if (rdev->family == CHIP_HAWAII) {
+ if (rdev->new_fw)
+ nop = PACKET3(PACKET3_NOP, 0x3FFF);
+ else
+ nop = RADEON_CP_PACKET2;
+ } else {
+ nop = PACKET3(PACKET3_NOP, 0x3FFF);
+ }
+
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
- PACKET3(PACKET3_NOP, 0x3FFF));
+ nop);
if (r)
return r;
@@ -8043,7 +8413,7 @@ static int cik_startup(struct radeon_device *rdev)
/* type-2 packets are deprecated on MEC, use type-3 instead */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
- PACKET3(PACKET3_NOP, 0x3FFF));
+ nop);
if (r)
return r;
ring->me = 1; /* first MEC */
@@ -8054,7 +8424,7 @@ static int cik_startup(struct radeon_device *rdev)
/* type-2 packets are deprecated on MEC, use type-3 instead */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
- PACKET3(PACKET3_NOP, 0x3FFF));
+ nop);
if (r)
return r;
/* dGPU only have 1 MEC */
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 8e9d0f1d858e..bcf480510ac2 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -24,6 +24,7 @@
#include <linux/firmware.h>
#include <drm/drmP.h>
#include "radeon.h"
+#include "radeon_ucode.h"
#include "radeon_asic.h"
#include "radeon_trace.h"
#include "cikd.h"
@@ -118,6 +119,7 @@ void cik_sdma_set_wptr(struct radeon_device *rdev,
reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
WREG32(reg, (ring->wptr << 2) & 0x3fffc);
+ (void)RREG32(reg);
}
/**
@@ -419,7 +421,6 @@ static int cik_sdma_rlc_resume(struct radeon_device *rdev)
*/
static int cik_sdma_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
int i;
if (!rdev->sdma_fw)
@@ -428,19 +429,48 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev)
/* halt the MEs */
cik_sdma_enable(rdev, false);
- /* sdma0 */
- fw_data = (const __be32 *)rdev->sdma_fw->data;
- WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
- for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
- WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
- WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
-
- /* sdma1 */
- fw_data = (const __be32 *)rdev->sdma_fw->data;
- WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
- for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
- WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
- WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+ if (rdev->new_fw) {
+ const struct sdma_firmware_header_v1_0 *hdr =
+ (const struct sdma_firmware_header_v1_0 *)rdev->sdma_fw->data;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+ radeon_ucode_print_sdma_hdr(&hdr->header);
+
+ /* sdma0 */
+ fw_data = (const __le32 *)
+ (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, le32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+
+ /* sdma1 */
+ fw_data = (const __le32 *)
+ (rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, le32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+ } else {
+ const __be32 *fw_data;
+
+ /* sdma0 */
+ fw_data = (const __be32 *)rdev->sdma_fw->data;
+ WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
+ for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+
+ /* sdma1 */
+ fw_data = (const __be32 *)rdev->sdma_fw->data;
+ WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
+ for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+ }
WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
@@ -719,7 +749,43 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
}
/**
- * cik_sdma_vm_set_page - update the page tables using sDMA
+ * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using sDMA (CIK).
+ */
+void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count)
+{
+ while (count) {
+ unsigned bytes = count * 8;
+ if (bytes > 0x1FFFF8)
+ bytes = 0x1FFFF8;
+
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
+ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = bytes;
+ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(src);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+
+ pe += bytes;
+ src += bytes;
+ count -= bytes / 8;
+ }
+}
+
+/**
+ * cik_sdma_vm_write_pages - update PTEs by writing them manually
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
@@ -729,84 +795,103 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
* @incr: increase next addr by incr bytes
* @flags: access flags
*
- * Update the page tables using sDMA (CIK).
+ * Update PTEs by writing them manually using sDMA (CIK).
*/
-void cik_sdma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+void cik_sdma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
{
uint64_t value;
unsigned ndw;
- trace_radeon_vm_set_page(pe, addr, count, incr, flags);
-
- if (flags == R600_PTE_GART) {
- uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
- while (count) {
- unsigned bytes = count * 8;
- if (bytes > 0x1FFFF8)
- bytes = 0x1FFFF8;
-
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib->ptr[ib->length_dw++] = bytes;
- ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
- ib->ptr[ib->length_dw++] = lower_32_bits(src);
- ib->ptr[ib->length_dw++] = upper_32_bits(src);
- ib->ptr[ib->length_dw++] = lower_32_bits(pe);
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
-
- pe += bytes;
- src += bytes;
- count -= bytes / 8;
- }
- } else if (flags & R600_PTE_SYSTEM) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+ SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- while (count) {
- ndw = count;
- if (ndw > 0x7FFFF)
- ndw = 0x7FFFF;
-
- if (flags & R600_PTE_VALID)
+ } else if (flags & R600_PTE_VALID) {
value = addr;
- else
+ } else {
value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
+ }
+ addr += incr;
+ value |= flags;
+ ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = ndw; /* number of entries */
- pe += ndw * 8;
- addr += ndw * incr;
- count -= ndw;
}
}
+}
+
+/**
+ * cik_sdma_vm_set_pages - update the page tables using sDMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+void cik_sdma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint64_t value;
+ unsigned ndw;
+
+ while (count) {
+ ndw = count;
+ if (ndw > 0x7FFFF)
+ ndw = 0x7FFFF;
+
+ if (flags & R600_PTE_VALID)
+ value = addr;
+ else
+ value = 0;
+
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = ndw; /* number of entries */
+
+ pe += ndw * 8;
+ addr += ndw * incr;
+ count -= ndw;
+ }
+}
+
+/**
+ * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
+{
while (ib->length_dw & 0x7)
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
}
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 0a65dc7e93e7..ab29f953a767 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -136,13 +136,13 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
AUDIO_LIPSYNC(connector->audio_latency[1]);
else
- tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+ tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
} else {
if (connector->latency_present[0])
tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
AUDIO_LIPSYNC(connector->audio_latency[0]);
else
- tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
+ tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
}
WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
}
@@ -164,8 +164,10 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
offset = dig->afmt->pin->offset;
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder)
+ if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
+ break;
+ }
}
if (!radeon_connector) {
@@ -173,7 +175,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return;
@@ -225,8 +227,10 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
offset = dig->afmt->pin->offset;
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder)
+ if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
+ break;
+ }
}
if (!radeon_connector) {
@@ -234,7 +238,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+ sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 15e4f28015e1..4fedd14e670a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2424,7 +2424,6 @@ static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
@@ -2677,7 +2676,7 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
if (save->crtc_enabled[i]) {
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
- tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
+ tmp &= ~EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
@@ -4023,7 +4022,8 @@ int sumo_rlc_init(struct radeon_device *rdev)
/* save restore block */
if (rdev->rlc.save_restore_obj == NULL) {
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+ &rdev->rlc.save_restore_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
return r;
@@ -4101,7 +4101,8 @@ int sumo_rlc_init(struct radeon_device *rdev)
if (rdev->rlc.clear_state_obj == NULL) {
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+ &rdev->rlc.clear_state_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
sumo_rlc_fini(rdev);
@@ -4175,8 +4176,10 @@ int sumo_rlc_init(struct radeon_device *rdev)
if (rdev->rlc.cp_table_size) {
if (rdev->rlc.cp_table_obj == NULL) {
- r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.cp_table_obj);
+ r = radeon_bo_create(rdev, rdev->rlc.cp_table_size,
+ PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
+ &rdev->rlc.cp_table_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
sumo_rlc_fini(rdev);
@@ -4961,7 +4964,8 @@ restart_ih:
case 16: /* D5 page flip */
case 18: /* D6 page flip */
DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
- radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
break;
case 42: /* HPD hotplug */
switch (src_data) {
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 1ec0e6e83f9f..278c7a139d74 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -117,7 +117,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return;
@@ -172,7 +172,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+ sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 5a33ca681867..327b85f7fd0d 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1229,7 +1229,6 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index 6378e0276691..8a3e6221cece 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -307,7 +307,43 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
}
/**
- * cayman_dma_vm_set_page - update the page tables using the DMA
+ * cayman_dma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr where to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using the DMA (cayman/TN).
+ */
+void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count)
+{
+ unsigned ndw;
+
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+ 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+
+ pe += ndw * 4;
+ src += ndw * 4;
+ count -= ndw / 2;
+ }
+}
+
+/**
+ * cayman_dma_vm_write_pages - update PTEs by writing them manually
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
@@ -315,71 +351,103 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
- * @flags: hw access flags
+ * @flags: hw access flags
*
- * Update the page tables using the DMA (cayman/TN).
+ * Update PTEs by writing them manually using the DMA (cayman/TN).
*/
-void cayman_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+void cayman_dma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
{
uint64_t value;
unsigned ndw;
- trace_radeon_vm_set_page(pe, addr, count, incr, flags);
-
- if ((flags & R600_PTE_SYSTEM) || (count == 1)) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & R600_PTE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & R600_PTE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- if (flags & R600_PTE_VALID)
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE,
+ 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & R600_PTE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & R600_PTE_VALID) {
value = addr;
- else
+ } else {
value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
+ }
+ addr += incr;
+ value |= flags;
+ ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- pe += ndw * 4;
- addr += (ndw / 2) * incr;
- count -= ndw / 2;
}
}
+}
+
+/**
+ * cayman_dma_vm_set_pages - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Update the page tables using the DMA (cayman/TN).
+ */
+void cayman_dma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint64_t value;
+ unsigned ndw;
+
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & R600_PTE_VALID)
+ value = addr;
+ else
+ value = 0;
+
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
+}
+
+/**
+ * cayman_dma_vm_pad_ib - pad the IB to the required number of dw
+ *
+ * @ib: indirect buffer to fill with padding
+ *
+ */
+void cayman_dma_vm_pad_ib(struct radeon_ib *ib)
+{
while (ib->length_dw & 0x7)
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
}
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 1544efcf1c3a..04b5940b8923 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -652,7 +652,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
{
uint32_t tmp;
- radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32(RADEON_AIC_CNTL, tmp);
@@ -683,7 +682,7 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
}
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr)
+ uint64_t addr, uint32_t flags)
{
u32 *gtt = rdev->gart.ptr;
gtt[i] = cpu_to_le32(lower_32_bits(addr));
@@ -838,11 +837,7 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
/* Wait until IDLE & CLEAN */
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
- radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
- RADEON_HDP_READ_BUFFER_INVALIDATE);
- radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
+ r100_ring_hdp_flush(rdev, ring);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
radeon_ring_write(ring, fence->seq);
@@ -1061,6 +1056,20 @@ void r100_gfx_set_wptr(struct radeon_device *rdev,
(void)RREG32(RADEON_CP_RB_WPTR);
}
+/**
+ * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
+ * rdev: radeon device structure
+ * ring: ring buffer struct for emitting packets
+ */
+void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
+ RADEON_HDP_READ_BUFFER_INVALIDATE);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
+}
+
static void r100_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
@@ -1401,7 +1410,6 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
*/
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
{
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
struct radeon_cs_packet p3reloc, waitreloc;
@@ -1441,12 +1449,11 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 5);
reg = R100_CP_PACKET0_GET_REG(header);
- obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
+ if (!crtc) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
return -ENOENT;
}
- crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
crtc_id = radeon_crtc->crtc_id;
@@ -4067,39 +4074,6 @@ int r100_init(struct radeon_device *rdev)
return 0;
}
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
- bool always_indirect)
-{
- if (reg < rdev->rmmio_size && !always_indirect)
- return readl(((void __iomem *)rdev->rmmio) + reg);
- else {
- unsigned long flags;
- uint32_t ret;
-
- spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
- writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
- spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
-
- return ret;
- }
-}
-
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
- bool always_indirect)
-{
- if (reg < rdev->rmmio_size && !always_indirect)
- writel(v, ((void __iomem *)rdev->rmmio) + reg);
- else {
- unsigned long flags;
-
- spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
- writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
- writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
- spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
- }
-}
-
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
{
if (reg < rdev->rio_mem_size)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 3c21d77a483d..75b30338c226 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -69,17 +69,23 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
mb();
}
+#define R300_PTE_UNSNOOPED (1 << 0)
#define R300_PTE_WRITEABLE (1 << 2)
#define R300_PTE_READABLE (1 << 3)
void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr)
+ uint64_t addr, uint32_t flags)
{
void __iomem *ptr = rdev->gart.ptr;
addr = (lower_32_bits(addr) >> 8) |
- ((upper_32_bits(addr) & 0xff) << 24) |
- R300_PTE_WRITEABLE | R300_PTE_READABLE;
+ ((upper_32_bits(addr) & 0xff) << 24);
+ if (flags & RADEON_GART_PAGE_READ)
+ addr |= R300_PTE_READABLE;
+ if (flags & RADEON_GART_PAGE_WRITE)
+ addr |= R300_PTE_WRITEABLE;
+ if (!(flags & RADEON_GART_PAGE_SNOOP))
+ addr |= R300_PTE_UNSNOOPED;
/* on x86 we want this to be CPU endian, on powerpc
* on powerpc without HW swappers, it'll get swapped on way
* into VRAM - so no need for cpu_to_le32 on VRAM tables */
@@ -120,7 +126,6 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 3c69f58e46ef..c70a504d96af 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -968,7 +968,6 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -1339,7 +1338,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev)
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- NULL, &rdev->vram_scratch.robj);
+ 0, NULL, &rdev->vram_scratch.robj);
if (r) {
return r;
}
@@ -3227,7 +3226,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev)
if (rdev->ih.ring_obj == NULL) {
r = radeon_bo_create(rdev, rdev->ih.ring_size,
PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_DOMAIN_GTT, 0,
NULL, &rdev->ih.ring_obj);
if (r) {
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
@@ -3924,11 +3923,13 @@ restart_ih:
break;
case 9: /* D1 pflip */
DRM_DEBUG("IH: D1 flip\n");
- radeon_crtc_handle_flip(rdev, 0);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, 0);
break;
case 11: /* D2 pflip */
DRM_DEBUG("IH: D2 flip\n");
- radeon_crtc_handle_flip(rdev, 1);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, 1);
break;
case 19: /* HPD/DAC hotplug */
switch (src_data) {
@@ -4089,16 +4090,15 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
}
/**
- * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
+ * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO
* rdev: radeon device structure
- * bo: buffer object struct which userspace is waiting for idle
*
- * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
- * through ring buffer, this leads to corruption in rendering, see
- * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
- * directly perform HDP flush by writing register through MMIO.
+ * Some R6XX/R7XX don't seem to take into account HDP flushes performed
+ * through the ring buffer. This leads to corruption in rendering, see
+ * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we
+ * directly perform the HDP flush by writing the register through MMIO.
*/
-void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
+void r600_mmio_hdp_flush(struct radeon_device *rdev)
{
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 12511bb5fd6f..c47537a1ddba 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -825,7 +825,6 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
uint32_t *vline_start_end,
uint32_t *vline_status)
{
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
struct radeon_cs_packet p3reloc, wait_reg_mem;
@@ -887,12 +886,11 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
reg = R600_CP_PACKET0_GET_REG(header);
- obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
+ if (!crtc) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
return -ENOENT;
}
- crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
crtc_id = radeon_crtc->crtc_id;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 60c47f829122..9e1732eb402c 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -64,6 +64,7 @@
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
+#include <linux/interval_tree.h>
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
@@ -103,6 +104,7 @@ extern int radeon_hard_reset;
extern int radeon_vm_size;
extern int radeon_vm_block_size;
extern int radeon_deep_color;
+extern int radeon_use_pflipirq;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -304,6 +306,9 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *r
u16 *vddc, u16 *vddci,
u16 virtual_voltage_id,
u16 vbios_voltage_id);
+int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
+ u16 virtual_voltage_id,
+ u16 *voltage);
int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
u8 voltage_type,
u16 nominal_voltage,
@@ -317,6 +322,9 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
struct atom_voltage_table *voltage_table);
bool radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
u8 voltage_type, u8 voltage_mode);
+int radeon_atom_get_svi2_info(struct radeon_device *rdev,
+ u8 voltage_type,
+ u8 *svd_gpio_id, u8 *svc_gpio_id);
void radeon_atom_update_memory_dll(struct radeon_device *rdev,
u32 mem_clock);
void radeon_atom_set_ac_timing(struct radeon_device *rdev,
@@ -441,14 +449,12 @@ struct radeon_mman {
struct radeon_bo_va {
/* protected by bo being reserved */
struct list_head bo_list;
- uint64_t soffset;
- uint64_t eoffset;
uint32_t flags;
- bool valid;
+ uint64_t addr;
unsigned ref_count;
/* protected by vm mutex */
- struct list_head vm_list;
+ struct interval_tree_node it;
struct list_head vm_status;
/* constant after initialization */
@@ -465,6 +471,7 @@ struct radeon_bo {
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
+ u32 flags;
unsigned pin_count;
void *kptr;
u32 tiling_flags;
@@ -543,9 +550,9 @@ struct radeon_gem {
int radeon_gem_init(struct radeon_device *rdev);
void radeon_gem_fini(struct radeon_device *rdev);
-int radeon_gem_object_create(struct radeon_device *rdev, int size,
+int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
int alignment, int initial_domain,
- bool discardable, bool kernel,
+ u32 flags, bool kernel,
struct drm_gem_object **obj);
int radeon_mode_dumb_create(struct drm_file *file_priv,
@@ -590,6 +597,12 @@ struct radeon_mc;
#define RADEON_GPU_PAGE_SHIFT 12
#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
+#define RADEON_GART_PAGE_DUMMY 0
+#define RADEON_GART_PAGE_VALID (1 << 0)
+#define RADEON_GART_PAGE_READ (1 << 1)
+#define RADEON_GART_PAGE_WRITE (1 << 2)
+#define RADEON_GART_PAGE_SNOOP (1 << 3)
+
struct radeon_gart {
dma_addr_t table_addr;
struct radeon_bo *robj;
@@ -614,8 +627,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int pages);
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, struct page **pagelist,
- dma_addr_t *dma_addr);
-void radeon_gart_restore(struct radeon_device *rdev);
+ dma_addr_t *dma_addr, uint32_t flags);
/*
@@ -855,9 +867,9 @@ struct radeon_mec {
#define R600_PTE_FRAG_64KB (4 << 7)
#define R600_PTE_FRAG_256KB (6 << 7)
-/* flags used for GART page table entries on R600+ */
-#define R600_PTE_GART ( R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED \
- | R600_PTE_READABLE | R600_PTE_WRITEABLE)
+/* flags needed to be set so we can copy directly from the GART table */
+#define R600_PTE_GART_MASK ( R600_PTE_READABLE | R600_PTE_WRITEABLE | \
+ R600_PTE_SYSTEM | R600_PTE_VALID )
struct radeon_vm_pt {
struct radeon_bo *bo;
@@ -865,9 +877,12 @@ struct radeon_vm_pt {
};
struct radeon_vm {
- struct list_head va;
+ struct rb_root va;
unsigned id;
+ /* BOs moved, but not yet updated in the PT */
+ struct list_head invalidated;
+
/* BOs freed, but not yet updated in the PT */
struct list_head freed;
@@ -1740,6 +1755,7 @@ struct radeon_asic_ring {
/* command emmit functions */
void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
+ void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, bool emit_wait);
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
@@ -1763,13 +1779,8 @@ struct radeon_asic {
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
int (*asic_reset)(struct radeon_device *rdev);
- /* ioctl hw specific callback. Some hw might want to perform special
- * operation on specific ioctl. For instance on wait idle some hw
- * might want to perform and HDP flush through MMIO as it seems that
- * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
- * through ring.
- */
- void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
+ /* Flush the HDP cache via MMIO */
+ void (*mmio_hdp_flush)(struct radeon_device *rdev);
/* check if 3D engine is idle */
bool (*gui_idle)(struct radeon_device *rdev);
/* wait for mc_idle */
@@ -1782,16 +1793,26 @@ struct radeon_asic {
struct {
void (*tlb_flush)(struct radeon_device *rdev);
void (*set_page)(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
} gart;
struct {
int (*init)(struct radeon_device *rdev);
void (*fini)(struct radeon_device *rdev);
- void (*set_page)(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+ void (*copy_pages)(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count);
+ void (*write_pages)(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+ void (*set_pages)(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+ void (*pad_ib)(struct radeon_ib *ib);
} vm;
/* ring specific callbacks */
struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
@@ -2299,10 +2320,12 @@ struct radeon_device {
const struct firmware *mc_fw; /* NI MC firmware */
const struct firmware *ce_fw; /* SI CE firmware */
const struct firmware *mec_fw; /* CIK MEC firmware */
+ const struct firmware *mec2_fw; /* KV MEC2 firmware */
const struct firmware *sdma_fw; /* CIK SDMA firmware */
const struct firmware *smc_fw; /* SMC firmware */
const struct firmware *uvd_fw; /* UVD firmware */
const struct firmware *vce_fw; /* VCE firmware */
+ bool new_fw;
struct r600_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
@@ -2342,6 +2365,11 @@ struct radeon_device {
struct dev_pm_domain vga_pm_domain;
bool have_disp_power_ref;
+ u32 px_quirk_flags;
+
+ /* tracking pinned memory */
+ u64 vram_pin_size;
+ u64 gart_pin_size;
};
bool radeon_is_px(struct drm_device *dev);
@@ -2352,10 +2380,42 @@ int radeon_device_init(struct radeon_device *rdev,
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
- bool always_indirect);
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
- bool always_indirect);
+#define RADEON_MIN_MMIO_SIZE 0x10000
+
+static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
+ bool always_indirect)
+{
+ /* The mmio size is 64kb at minimum. Allows the if to be optimized out. */
+ if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
+ return readl(((void __iomem *)rdev->rmmio) + reg);
+ else {
+ unsigned long flags;
+ uint32_t ret;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+ writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+ ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+
+ return ret;
+ }
+}
+
+static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
+ bool always_indirect)
+{
+ if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
+ writel(v, ((void __iomem *)rdev->rmmio) + reg);
+ else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
+ writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
+ writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
+ spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
+ }
+}
+
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
@@ -2709,10 +2769,13 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
-#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
+#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
-#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
+#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
+#define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib)))
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
@@ -2840,6 +2903,8 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
struct radeon_vm *vm);
int radeon_vm_clear_freed(struct radeon_device *rdev,
struct radeon_vm *vm);
+int radeon_vm_clear_invalids(struct radeon_device *rdev,
+ struct radeon_vm *vm);
int radeon_vm_bo_update(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
struct ttm_mem_reg *mem);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 34b9aa9e3c06..eeeeabe09758 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -185,6 +185,7 @@ static struct radeon_asic_ring r100_gfx_ring = {
.get_rptr = &r100_gfx_get_rptr,
.get_wptr = &r100_gfx_get_wptr,
.set_wptr = &r100_gfx_set_wptr,
+ .hdp_flush = &r100_ring_hdp_flush,
};
static struct radeon_asic r100_asic = {
@@ -194,7 +195,7 @@ static struct radeon_asic r100_asic = {
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r100_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
@@ -260,7 +261,7 @@ static struct radeon_asic r200_asic = {
.resume = &r100_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r100_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
@@ -331,6 +332,7 @@ static struct radeon_asic_ring r300_gfx_ring = {
.get_rptr = &r100_gfx_get_rptr,
.get_wptr = &r100_gfx_get_wptr,
.set_wptr = &r100_gfx_set_wptr,
+ .hdp_flush = &r100_ring_hdp_flush,
};
static struct radeon_asic r300_asic = {
@@ -340,7 +342,7 @@ static struct radeon_asic r300_asic = {
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
@@ -406,7 +408,7 @@ static struct radeon_asic r300_asic_pcie = {
.resume = &r300_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
@@ -472,7 +474,7 @@ static struct radeon_asic r420_asic = {
.resume = &r420_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
@@ -538,7 +540,7 @@ static struct radeon_asic rs400_asic = {
.resume = &rs400_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs400_mc_wait_for_idle,
.gart = {
@@ -604,7 +606,7 @@ static struct radeon_asic rs600_asic = {
.resume = &rs600_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs600_mc_wait_for_idle,
.gart = {
@@ -672,7 +674,7 @@ static struct radeon_asic rs690_asic = {
.resume = &rs690_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs690_mc_wait_for_idle,
.gart = {
@@ -740,7 +742,7 @@ static struct radeon_asic rv515_asic = {
.resume = &rv515_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rv515_mc_wait_for_idle,
.gart = {
@@ -806,7 +808,7 @@ static struct radeon_asic r520_asic = {
.resume = &r520_resume,
.vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r520_mc_wait_for_idle,
.gart = {
@@ -898,7 +900,7 @@ static struct radeon_asic r600_asic = {
.resume = &r600_resume,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
@@ -970,7 +972,7 @@ static struct radeon_asic rv6xx_asic = {
.resume = &r600_resume,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
@@ -1060,7 +1062,7 @@ static struct radeon_asic rs780_asic = {
.resume = &r600_resume,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
@@ -1163,7 +1165,7 @@ static struct radeon_asic rv770_asic = {
.resume = &rv770_resume,
.asic_reset = &r600_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
@@ -1281,7 +1283,7 @@ static struct radeon_asic evergreen_asic = {
.resume = &evergreen_resume,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
@@ -1373,7 +1375,7 @@ static struct radeon_asic sumo_asic = {
.resume = &evergreen_resume,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
@@ -1464,7 +1466,7 @@ static struct radeon_asic btc_asic = {
.resume = &evergreen_resume,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
@@ -1599,7 +1601,7 @@ static struct radeon_asic cayman_asic = {
.resume = &cayman_resume,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &rv770_get_xclk,
@@ -1611,7 +1613,10 @@ static struct radeon_asic cayman_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .set_page = &cayman_dma_vm_set_page,
+ .copy_pages = &cayman_dma_vm_copy_pages,
+ .write_pages = &cayman_dma_vm_write_pages,
+ .set_pages = &cayman_dma_vm_set_pages,
+ .pad_ib = &cayman_dma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1699,7 +1704,7 @@ static struct radeon_asic trinity_asic = {
.resume = &cayman_resume,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &r600_get_xclk,
@@ -1711,7 +1716,10 @@ static struct radeon_asic trinity_asic = {
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
- .set_page = &cayman_dma_vm_set_page,
+ .copy_pages = &cayman_dma_vm_copy_pages,
+ .write_pages = &cayman_dma_vm_write_pages,
+ .set_pages = &cayman_dma_vm_set_pages,
+ .pad_ib = &cayman_dma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
@@ -1829,7 +1837,7 @@ static struct radeon_asic si_asic = {
.resume = &si_resume,
.asic_reset = &si_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
+ .mmio_hdp_flush = r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &si_get_xclk,
@@ -1841,7 +1849,10 @@ static struct radeon_asic si_asic = {
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
- .set_page = &si_dma_vm_set_page,
+ .copy_pages = &si_dma_vm_copy_pages,
+ .write_pages = &si_dma_vm_write_pages,
+ .set_pages = &si_dma_vm_set_pages,
+ .pad_ib = &cayman_dma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
@@ -1987,7 +1998,7 @@ static struct radeon_asic ci_asic = {
.resume = &cik_resume,
.asic_reset = &cik_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = &r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &cik_get_xclk,
@@ -1999,7 +2010,10 @@ static struct radeon_asic ci_asic = {
.vm = {
.init = &cik_vm_init,
.fini = &cik_vm_fini,
- .set_page = &cik_sdma_vm_set_page,
+ .copy_pages = &cik_sdma_vm_copy_pages,
+ .write_pages = &cik_sdma_vm_write_pages,
+ .set_pages = &cik_sdma_vm_set_pages,
+ .pad_ib = &cik_sdma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2091,7 +2105,7 @@ static struct radeon_asic kv_asic = {
.resume = &cik_resume,
.asic_reset = &cik_asic_reset,
.vga_set_state = &r600_vga_set_state,
- .ioctl_wait_idle = NULL,
+ .mmio_hdp_flush = &r600_mmio_hdp_flush,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.get_xclk = &cik_get_xclk,
@@ -2103,7 +2117,10 @@ static struct radeon_asic kv_asic = {
.vm = {
.init = &cik_vm_init,
.fini = &cik_vm_fini,
- .set_page = &cik_sdma_vm_set_page,
+ .copy_pages = &cik_sdma_vm_copy_pages,
+ .write_pages = &cik_sdma_vm_write_pages,
+ .set_pages = &cik_sdma_vm_set_pages,
+ .pad_ib = &cik_sdma_vm_pad_ib,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
@@ -2457,7 +2474,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- RADEON_CG_SUPPORT_GFX_CGCG |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
@@ -2476,7 +2493,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- RADEON_CG_SUPPORT_GFX_CGCG |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CP_LS |
@@ -2502,7 +2519,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- RADEON_CG_SUPPORT_GFX_CGCG |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
@@ -2530,7 +2547,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->cg_flags =
RADEON_CG_SUPPORT_GFX_MGCG |
RADEON_CG_SUPPORT_GFX_MGLS |
- RADEON_CG_SUPPORT_GFX_CGCG |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
RADEON_CG_SUPPORT_GFX_CGLS |
RADEON_CG_SUPPORT_GFX_CGTS |
RADEON_CG_SUPPORT_GFX_CGTS_LS |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 01e7c0ad8f01..275a5dc01780 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -68,7 +68,7 @@ int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
@@ -148,7 +148,8 @@ u32 r100_gfx_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void r100_gfx_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
-
+void r100_ring_hdp_flush(struct radeon_device *rdev,
+ struct radeon_ring *ring);
/*
* r200,rv250,rs300,rv280
*/
@@ -173,7 +174,7 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -209,7 +210,7 @@ extern int rs400_suspend(struct radeon_device *rdev);
extern int rs400_resume(struct radeon_device *rdev);
void rs400_gart_tlb_flush(struct radeon_device *rdev);
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int rs400_gart_init(struct radeon_device *rdev);
@@ -233,7 +234,7 @@ void rs600_irq_disable(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr);
+ uint64_t addr, uint32_t flags);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);
@@ -351,7 +352,7 @@ void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
-extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
+extern void r600_mmio_hdp_flush(struct radeon_device *rdev);
extern bool r600_gui_idle(struct radeon_device *rdev);
extern void r600_pm_misc(struct radeon_device *rdev);
extern void r600_pm_init_profile(struct radeon_device *rdev);
@@ -606,11 +607,22 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib);
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
-void cayman_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+
+void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count);
+void cayman_dma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void cayman_dma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void cayman_dma_vm_pad_ib(struct radeon_ib *ib);
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
@@ -693,11 +705,22 @@ int si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
-void si_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+
+void si_dma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count);
+void si_dma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void si_dma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
u32 si_get_xclk(struct radeon_device *rdev);
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
@@ -771,11 +794,23 @@ int cik_irq_process(struct radeon_device *rdev);
int cik_vm_init(struct radeon_device *rdev);
void cik_vm_fini(struct radeon_device *rdev);
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
-void cik_sdma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags);
+
+void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count);
+void cik_sdma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void cik_sdma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
+void cik_sdma_vm_pad_ib(struct radeon_ib *ib);
+
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
u32 cik_gfx_get_rptr(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 173f378428a9..92b2d8dd4735 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1963,7 +1963,7 @@ static const char *thermal_controller_names[] = {
"adm1032",
"adm1030",
"max6649",
- "lm64",
+ "lm63", /* lm64 */
"f75375",
"asc7xxx",
};
@@ -1974,7 +1974,7 @@ static const char *pp_lib_thermal_controller_names[] = {
"adm1032",
"adm1030",
"max6649",
- "lm64",
+ "lm63", /* lm64 */
"f75375",
"RV6xx",
"RV770",
@@ -3236,6 +3236,41 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *r
return 0;
}
+union get_voltage_info {
+ struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
+ struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
+};
+
+int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
+ u16 virtual_voltage_id,
+ u16 *voltage)
+{
+ int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
+ u32 entry_id;
+ u32 count = rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
+ union get_voltage_info args;
+
+ for (entry_id = 0; entry_id < count; entry_id++) {
+ if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
+ virtual_voltage_id)
+ break;
+ }
+
+ if (entry_id >= count)
+ return -EINVAL;
+
+ args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
+ args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
+ args.in.ulSCLKFreq =
+ cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ *voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
+
+ return 0;
+}
+
int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
u16 voltage_level, u8 voltage_type,
u32 *gpio_value, u32 *gpio_mask)
@@ -3397,6 +3432,50 @@ radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
return false;
}
+int radeon_atom_get_svi2_info(struct radeon_device *rdev,
+ u8 voltage_type,
+ u8 *svd_gpio_id, u8 *svc_gpio_id)
+{
+ int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
+ u8 frev, crev;
+ u16 data_offset, size;
+ union voltage_object_info *voltage_info;
+ union voltage_object *voltage_object = NULL;
+
+ if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ voltage_info = (union voltage_object_info *)
+ (rdev->mode_info.atom_context->bios + data_offset);
+
+ switch (frev) {
+ case 3:
+ switch (crev) {
+ case 1:
+ voltage_object = (union voltage_object *)
+ atom_lookup_voltage_object_v3(&voltage_info->v3,
+ voltage_type,
+ VOLTAGE_OBJ_SVID2);
+ if (voltage_object) {
+ *svd_gpio_id = voltage_object->v3.asSVID2Obj.ucSVDGpioId;
+ *svc_gpio_id = voltage_object->v3.asSVID2Obj.ucSVCGpioId;
+ } else {
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("unknown voltage object table\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("unknown voltage object table\n");
+ return -EINVAL;
+ }
+
+ }
+ return 0;
+}
+
int radeon_atom_get_max_voltage(struct radeon_device *rdev,
u8 voltage_type, u16 *max_voltage)
{
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 6e05a2e75a46..69f5695bdab9 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -97,7 +97,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
int time;
n = RADEON_BENCHMARK_ITERATIONS;
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj);
if (r) {
goto out_cleanup;
}
@@ -109,7 +109,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (r) {
goto out_cleanup;
}
- r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 44831197e82e..300c4b3d4669 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -107,7 +107,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB:
if (radeon_connector->use_digital) {
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -115,7 +115,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
break;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -124,7 +124,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
- drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -148,7 +148,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
break;
}
- if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
/* hdmi deep color only implemented on DCE4+ */
if ((bpc > 8) && !ASIC_IS_DCE4(rdev)) {
DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 8 bpc.\n",
@@ -197,10 +197,19 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
connector->name, bpc);
}
}
+ else if (bpc > 8) {
+ /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
+ DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
+ connector->name);
+ bpc = 8;
+ }
}
- if ((radeon_deep_color == 0) && (bpc > 8))
+ if ((radeon_deep_color == 0) && (bpc > 8)) {
+ DRM_DEBUG("%s: Deep color disabled. Set radeon module param deep_color=1 to enable.\n",
+ connector->name);
bpc = 8;
+ }
DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
connector->name, connector->display_info.bpc, bpc);
@@ -216,7 +225,6 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
struct drm_encoder *best_encoder = NULL;
struct drm_encoder *encoder = NULL;
struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
- struct drm_mode_object *obj;
bool connected;
int i;
@@ -226,14 +234,11 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
if (connector->encoder_ids[i] == 0)
break;
- obj = drm_mode_object_find(connector->dev,
- connector->encoder_ids[i],
- DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(connector->dev,
+ connector->encoder_ids[i]);
+ if (!encoder)
continue;
- encoder = obj_to_encoder(obj);
-
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
@@ -249,7 +254,6 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c
static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
{
- struct drm_mode_object *obj;
struct drm_encoder *encoder;
int i;
@@ -257,34 +261,134 @@ static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector,
if (connector->encoder_ids[i] == 0)
break;
- obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
continue;
- encoder = obj_to_encoder(obj);
if (encoder->encoder_type == encoder_type)
return encoder;
}
return NULL;
}
+struct edid *radeon_connector_edid(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ struct drm_property_blob *edid_blob = connector->edid_blob_ptr;
+
+ if (radeon_connector->edid) {
+ return radeon_connector->edid;
+ } else if (edid_blob) {
+ struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL);
+ if (edid)
+ radeon_connector->edid = edid;
+ }
+ return radeon_connector->edid;
+}
+
+static void radeon_connector_get_edid(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_connector->edid)
+ return;
+
+ /* on hw with routers, select right port */
+ if (radeon_connector->router.ddc_valid)
+ radeon_router_select_ddc_port(radeon_connector);
+
+ if ((radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+ ENCODER_OBJECT_ID_NONE) &&
+ radeon_connector->ddc_bus->has_aux) {
+ radeon_connector->edid = drm_get_edid(connector,
+ &radeon_connector->ddc_bus->aux.ddc);
+ } else if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+
+ if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
+ dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
+ radeon_connector->ddc_bus->has_aux)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->aux.ddc);
+ else if (radeon_connector->ddc_bus)
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
+ } else if (radeon_connector->ddc_bus) {
+ radeon_connector->edid = drm_get_edid(&radeon_connector->base,
+ &radeon_connector->ddc_bus->adapter);
+ }
+
+ if (!radeon_connector->edid) {
+ if (rdev->is_atom_bios) {
+ /* some laptops provide a hardcoded edid in rom for LCDs */
+ if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)))
+ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ } else {
+ /* some servers provide a hardcoded edid in rom for KVMs */
+ radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+ }
+ }
+}
+
+static void radeon_connector_free_edid(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+ if (radeon_connector->edid) {
+ kfree(radeon_connector->edid);
+ radeon_connector->edid = NULL;
+ }
+}
+
+static int radeon_ddc_get_modes(struct drm_connector *connector)
+{
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ int ret;
+
+ if (radeon_connector->edid) {
+ drm_mode_connector_update_edid_property(connector, radeon_connector->edid);
+ ret = drm_add_edid_modes(connector, radeon_connector->edid);
+ drm_edid_to_eld(connector, radeon_connector->edid);
+ return ret;
+ }
+ drm_mode_connector_update_edid_property(connector, NULL);
+ return 0;
+}
+
static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
/* pick the encoder ids */
- if (enc_id) {
- obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
- }
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
+static void radeon_get_native_mode(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder = radeon_best_single_encoder(connector);
+ struct radeon_encoder *radeon_encoder;
+
+ if (encoder == NULL)
+ return;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (!list_empty(&connector->probed_modes)) {
+ struct drm_display_mode *preferred_mode =
+ list_first_entry(&connector->probed_modes,
+ struct drm_display_mode, head);
+
+ radeon_encoder->native_mode = *preferred_mode;
+ } else {
+ radeon_encoder->native_mode.clock = 0;
+ }
+}
+
/*
* radeon_connector_analog_encoder_conflict_solve
* - search for other connectors sharing this encoder
@@ -585,6 +689,35 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
radeon_property_change_mode(&radeon_encoder->base);
}
+ if (property == dev->mode_config.scaling_mode_property) {
+ enum radeon_rmx_type rmx_type;
+
+ if (connector->encoder)
+ radeon_encoder = to_radeon_encoder(connector->encoder);
+ else {
+ struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+ radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
+ }
+
+ switch (val) {
+ default:
+ case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
+ case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
+ case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
+ case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
+ }
+ if (radeon_encoder->rmx_type == rmx_type)
+ return 0;
+
+ if ((rmx_type != DRM_MODE_SCALE_NONE) &&
+ (radeon_encoder->native_mode.clock == 0))
+ return 0;
+
+ radeon_encoder->rmx_type = rmx_type;
+
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+
return 0;
}
@@ -625,22 +758,20 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
static int radeon_lvds_get_modes(struct drm_connector *connector)
{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
int ret = 0;
struct drm_display_mode *mode;
- if (radeon_connector->ddc_bus) {
- ret = radeon_ddc_get_modes(radeon_connector);
- if (ret > 0) {
- encoder = radeon_best_single_encoder(connector);
- if (encoder) {
- radeon_fixup_lvds_native_mode(encoder, connector);
- /* add scaled modes */
- radeon_add_common_modes(encoder, connector);
- }
- return ret;
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
+ if (ret > 0) {
+ encoder = radeon_best_single_encoder(connector);
+ if (encoder) {
+ radeon_fixup_lvds_native_mode(encoder, connector);
+ /* add scaled modes */
+ radeon_add_common_modes(encoder, connector);
}
+ return ret;
}
encoder = radeon_best_single_encoder(connector);
@@ -715,16 +846,9 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
}
/* check for edid as well */
+ radeon_connector_get_edid(connector);
if (radeon_connector->edid)
ret = connector_status_connected;
- else {
- if (radeon_connector->ddc_bus) {
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->adapter);
- if (radeon_connector->edid)
- ret = connector_status_connected;
- }
- }
/* check acpi lid status ??? */
radeon_connector_update_scratch_regs(connector, ret);
@@ -737,10 +861,9 @@ static void radeon_connector_destroy(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- if (radeon_connector->edid)
- kfree(radeon_connector->edid);
+ radeon_connector_free_edid(connector);
kfree(radeon_connector->con_priv);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -797,10 +920,12 @@ static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
static int radeon_vga_get_modes(struct drm_connector *connector)
{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int ret;
- ret = radeon_ddc_get_modes(radeon_connector);
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
+
+ radeon_get_native_mode(connector);
return ret;
}
@@ -843,28 +968,26 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
dret = radeon_ddc_probe(radeon_connector, false);
if (dret) {
radeon_connector->detected_by_load = false;
- if (radeon_connector->edid) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
- }
- radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+ radeon_connector_free_edid(connector);
+ radeon_connector_get_edid(connector);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
connector->name);
ret = connector_status_connected;
} else {
- radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+ radeon_connector->use_digital =
+ !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
/* some oems have boards with separate digital and analog connectors
* with a shared ddc line (often vga + hdmi)
*/
if (radeon_connector->use_digital && radeon_connector->shared_ddc) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
+ radeon_connector_free_edid(connector);
ret = connector_status_disconnected;
- } else
+ } else {
ret = connector_status_connected;
+ }
}
} else {
@@ -999,15 +1122,6 @@ static const struct drm_connector_funcs radeon_tv_connector_funcs = {
.set_property = radeon_connector_set_property,
};
-static int radeon_dvi_get_modes(struct drm_connector *connector)
-{
- struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- int ret;
-
- ret = radeon_ddc_get_modes(radeon_connector);
- return ret;
-}
-
static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -1048,7 +1162,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder = NULL;
struct drm_encoder_helper_funcs *encoder_funcs;
- struct drm_mode_object *obj;
int i, r;
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
@@ -1066,18 +1179,16 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
dret = radeon_ddc_probe(radeon_connector, false);
if (dret) {
radeon_connector->detected_by_load = false;
- if (radeon_connector->edid) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
- }
- radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
+ radeon_connector_free_edid(connector);
+ radeon_connector_get_edid(connector);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
connector->name);
/* rs690 seems to have a problem with connectors not existing and always
* return a block of 0's. If we see this just stop polling on this output */
- if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) {
+ if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) &&
+ radeon_connector->base.null_edid_counter) {
ret = connector_status_disconnected;
DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n",
connector->name);
@@ -1087,18 +1198,18 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
broken_edid = true; /* defer use_digital to later */
}
} else {
- radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+ radeon_connector->use_digital =
+ !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
/* some oems have boards with separate digital and analog connectors
* with a shared ddc line (often vga + hdmi)
*/
if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
+ radeon_connector_free_edid(connector);
ret = connector_status_disconnected;
- } else
+ } else {
ret = connector_status_connected;
-
+ }
/* This gets complicated. We have boards with VGA + HDMI with a
* shared DDC line and we have boards with DVI-D + HDMI with a shared
* DDC line. The latter is more complex because with DVI<->HDMI adapters
@@ -1118,8 +1229,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
/* hpd is our only option in this case */
if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
+ radeon_connector_free_edid(connector);
ret = connector_status_disconnected;
}
}
@@ -1153,14 +1263,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
if (connector->encoder_ids[i] == 0)
break;
- obj = drm_mode_object_find(connector->dev,
- connector->encoder_ids[i],
- DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(connector->dev,
+ connector->encoder_ids[i]);
+ if (!encoder)
continue;
- encoder = obj_to_encoder(obj);
-
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
@@ -1225,19 +1332,16 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- struct drm_mode_object *obj;
struct drm_encoder *encoder;
int i;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
- obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
continue;
- encoder = obj_to_encoder(obj);
-
if (radeon_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
@@ -1252,13 +1356,8 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
- if (enc_id) {
- obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
- }
+ if (enc_id)
+ return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
@@ -1291,7 +1390,7 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
return MODE_OK;
- else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
@@ -1310,7 +1409,7 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
}
static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
- .get_modes = radeon_dvi_get_modes,
+ .get_modes = radeon_vga_get_modes,
.mode_valid = radeon_dvi_mode_valid,
.best_encoder = radeon_dvi_encoder,
};
@@ -1339,7 +1438,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
- ret = radeon_ddc_get_modes(radeon_connector);
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
@@ -1350,7 +1450,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
- ret = radeon_ddc_get_modes(radeon_connector);
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
}
if (ret > 0) {
@@ -1383,7 +1484,10 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
- ret = radeon_ddc_get_modes(radeon_connector);
+ radeon_connector_get_edid(connector);
+ ret = radeon_ddc_get_modes(connector);
+
+ radeon_get_native_mode(connector);
}
return ret;
@@ -1391,7 +1495,6 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
{
- struct drm_mode_object *obj;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
int i;
@@ -1400,11 +1503,10 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
if (connector->encoder_ids[i] == 0)
break;
- obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
continue;
- encoder = obj_to_encoder(obj);
radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
@@ -1419,9 +1521,8 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
return ENCODER_OBJECT_ID_NONE;
}
-bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
+static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
{
- struct drm_mode_object *obj;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
int i;
@@ -1431,11 +1532,10 @@ bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0)
break;
- obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER);
- if (!obj)
+ encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
+ if (!encoder)
continue;
- encoder = obj_to_encoder(obj);
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
@@ -1478,10 +1578,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
goto out;
}
- if (radeon_connector->edid) {
- kfree(radeon_connector->edid);
- radeon_connector->edid = NULL;
- }
+ radeon_connector_free_edid(connector);
if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
(connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
@@ -1587,7 +1684,7 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
(radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
return radeon_dp_mode_valid_helper(connector, mode);
} else {
- if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
@@ -1747,6 +1844,9 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
@@ -1768,6 +1868,10 @@ radeon_add_atom_connector(struct drm_device *dev,
0);
drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
+
+ drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.dither_property,
RADEON_FMT_DITHER_DISABLE);
@@ -1817,6 +1921,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
+ if (ASIC_IS_AVIVO(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
@@ -1835,6 +1943,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.load_detect_property,
1);
+ if (ASIC_IS_AVIVO(rdev))
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->interlace_allowed = true;
@@ -1868,17 +1980,18 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
}
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
}
- if (ASIC_IS_AVIVO(rdev)) {
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.dither_property,
- RADEON_FMT_DITHER_DISABLE);
- }
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
drm_object_attach_property(&radeon_connector->base.base,
@@ -1918,17 +2031,18 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
}
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
}
- if (ASIC_IS_AVIVO(rdev)) {
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.dither_property,
- RADEON_FMT_DITHER_DISABLE);
- }
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1965,18 +2079,18 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.underscan_vborder_property,
0);
+ drm_object_attach_property(&radeon_connector->base.base,
+ rdev->mode_info.dither_property,
+ RADEON_FMT_DITHER_DISABLE);
+ drm_object_attach_property(&radeon_connector->base.base,
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
}
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
drm_object_attach_property(&radeon_connector->base.base,
rdev->mode_info.audio_property,
RADEON_AUDIO_AUTO);
}
- if (ASIC_IS_AVIVO(rdev)) {
- drm_object_attach_property(&radeon_connector->base.base,
- rdev->mode_info.dither_property,
- RADEON_FMT_DITHER_DISABLE);
-
- }
connector->interlace_allowed = true;
/* in theory with a DP to VGA converter... */
connector->doublescan_allowed = false;
@@ -2050,7 +2164,7 @@ radeon_add_atom_connector(struct drm_device *dev,
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
if (has_aux)
radeon_dp_aux_init(radeon_connector);
@@ -2211,5 +2325,5 @@ radeon_add_legacy_connector(struct drm_device *dev,
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ae763f60c8a0..ee712c199b25 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -500,7 +500,8 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
if (r)
return r;
}
- return 0;
+
+ return radeon_vm_clear_invalids(rdev, vm);
}
static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 697add2cd4e3..c8ea050c8fa4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -103,6 +103,31 @@ static const char radeon_family_name[][16] = {
"LAST",
};
+#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
+#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
+
+struct radeon_px_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+ u32 px_quirk_flags;
+};
+
+static struct radeon_px_quirk radeon_px_quirk_list[] = {
+ /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
+ * https://bugzilla.kernel.org/show_bug.cgi?id=74551
+ */
+ { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
+ /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
+ * https://bugzilla.kernel.org/show_bug.cgi?id=51381
+ */
+ { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
+ /* macbook pro 8.2 */
+ { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
+ { 0, 0, 0, 0, 0 },
+};
+
bool radeon_is_px(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
@@ -112,6 +137,26 @@ bool radeon_is_px(struct drm_device *dev)
return false;
}
+static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
+{
+ struct radeon_px_quirk *p = radeon_px_quirk_list;
+
+ /* Apply PX quirks */
+ while (p && p->chip_device != 0) {
+ if (rdev->pdev->vendor == p->chip_vendor &&
+ rdev->pdev->device == p->chip_device &&
+ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+ rdev->pdev->subsystem_device == p->subsys_device) {
+ rdev->px_quirk_flags = p->px_quirk_flags;
+ break;
+ }
+ ++p;
+ }
+
+ if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
+ rdev->flags &= ~RADEON_IS_PX;
+}
+
/**
* radeon_program_register_sequence - program an array of registers.
*
@@ -385,7 +430,8 @@ int radeon_wb_init(struct radeon_device *rdev)
if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
+ RADEON_GEM_DOMAIN_GTT, 0, NULL,
+ &rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r;
@@ -1077,7 +1123,19 @@ static void radeon_check_arguments(struct radeon_device *rdev)
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory */
- if (radeon_vm_block_size < 9) {
+ if (radeon_vm_block_size == -1) {
+
+ /* Total bits covered by PD + PTs */
+ unsigned bits = ilog2(radeon_vm_size) + 17;
+
+ /* Make sure the PD is 4K in size up to 8GB address space.
+ Above that split equal between PD and PTs */
+ if (radeon_vm_size <= 8)
+ radeon_vm_block_size = bits - 9;
+ else
+ radeon_vm_block_size = (bits + 3) / 2;
+
+ } else if (radeon_vm_block_size < 9) {
dev_warn(rdev->dev, "VM page table size (%d) too small\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
@@ -1092,25 +1150,6 @@ static void radeon_check_arguments(struct radeon_device *rdev)
}
/**
- * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
- * needed for waking up.
- *
- * @pdev: pci dev pointer
- */
-static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
-{
-
- /* 6600m in a macbook pro */
- if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
- pdev->subsystem_device == 0x00e2) {
- printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
- return true;
- }
-
- return false;
-}
-
-/**
* radeon_switcheroo_set_state - set switcheroo state
*
* @pdev: pci dev pointer
@@ -1122,6 +1161,7 @@ static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ struct radeon_device *rdev = dev->dev_private;
if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
return;
@@ -1133,7 +1173,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
+ if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
dev->pdev->d3_delay = 20;
radeon_resume_kms(dev, true, true);
@@ -1337,6 +1377,9 @@ int radeon_device_init(struct radeon_device *rdev,
if (rdev->rio_mem == NULL)
DRM_ERROR("Unable to find PCI I/O BAR\n");
+ if (rdev->flags & RADEON_IS_PX)
+ radeon_device_handle_px_quirks(rdev);
+
/* if we have > 1 VGA cards, then disable the radeon VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bf25061c8ac4..3fdf87318069 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -293,6 +293,18 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
if (radeon_crtc == NULL)
return;
+ /* Skip the pageflip completion check below (based on polling) on
+ * asics which reliably support hw pageflip completion irqs. pflip
+ * irqs are a reliable and race-free method of handling pageflip
+ * completion detection. A use_pflipirq module parameter < 2 allows
+ * to override this in case of asics with faulty pflip irqs.
+ * A module parameter of 0 would only use this polling based path,
+ * a parameter of 1 would use pflip irq only as a backup to this
+ * path, as in Linux 3.16.
+ */
+ if ((radeon_use_pflipirq == 2) && ASIC_IS_DCE4(rdev))
+ return;
+
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
@@ -823,64 +835,6 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
return ret;
}
-int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
-{
- struct drm_device *dev = radeon_connector->base.dev;
- struct radeon_device *rdev = dev->dev_private;
- int ret = 0;
-
- /* don't leak the edid if we already fetched it in detect() */
- if (radeon_connector->edid)
- goto got_edid;
-
- /* on hw with routers, select right port */
- if (radeon_connector->router.ddc_valid)
- radeon_router_select_ddc_port(radeon_connector);
-
- if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
- ENCODER_OBJECT_ID_NONE) {
- if (radeon_connector->ddc_bus->has_aux)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->aux.ddc);
- } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
- (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
- struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
-
- if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
- dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
- radeon_connector->ddc_bus->has_aux)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->aux.ddc);
- else if (radeon_connector->ddc_bus && !radeon_connector->edid)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->adapter);
- } else {
- if (radeon_connector->ddc_bus && !radeon_connector->edid)
- radeon_connector->edid = drm_get_edid(&radeon_connector->base,
- &radeon_connector->ddc_bus->adapter);
- }
-
- if (!radeon_connector->edid) {
- if (rdev->is_atom_bios) {
- /* some laptops provide a hardcoded edid in rom for LCDs */
- if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
- (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
- radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
- } else
- /* some servers provide a hardcoded edid in rom for KVMs */
- radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
- }
- if (radeon_connector->edid) {
-got_edid:
- drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
- ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
- drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
- return ret;
- }
- drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
- return 0;
-}
-
/* avivo */
/**
@@ -1749,7 +1703,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
(!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
- drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
is_hdtv_mode(mode)))) {
if (radeon_encoder->underscan_hborder != 0)
radeon_crtc->h_border = radeon_encoder->underscan_hborder;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 959f0866d993..092d067f93e1 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -82,9 +82,11 @@
* 2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN),
* CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
* 2.39.0 - Add INFO query for number of active CUs
+ * 2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
+ * CS to GPU
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 39
+#define KMS_DRIVER_MINOR 40
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -174,9 +176,10 @@ int radeon_dpm = -1;
int radeon_aspm = -1;
int radeon_runtime_pm = -1;
int radeon_hard_reset = 0;
-int radeon_vm_size = 4;
-int radeon_vm_block_size = 9;
+int radeon_vm_size = 8;
+int radeon_vm_block_size = -1;
int radeon_deep_color = 0;
+int radeon_use_pflipirq = 2;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -247,12 +250,15 @@ module_param_named(hard_reset, radeon_hard_reset, int, 0444);
MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
module_param_named(vm_size, radeon_vm_size, int, 0444);
-MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
+MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
module_param_named(vm_block_size, radeon_vm_block_size, int, 0444);
MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
module_param_named(deep_color, radeon_deep_color, int, 0444);
+MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))");
+module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444);
+
static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index bd4959ca23aa..3c2094c25b53 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -343,7 +343,7 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
case DRM_MODE_CONNECTOR_HDMIB:
if (radeon_connector->use_digital) {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (pixel_clock > 340000)
return true;
else
@@ -365,7 +365,7 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
return false;
else {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
if (pixel_clock > 340000)
return true;
else
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 665ced3b7313..94b0f2aa3d7c 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -127,8 +127,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
- false, true,
- &gobj);
+ 0, true, &gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
aligned_size);
@@ -331,7 +330,7 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
return 0;
}
-static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
.gamma_set = radeon_crtc_fb_gamma_set,
.gamma_get = radeon_crtc_fb_gamma_get,
.fb_probe = radeonfb_create,
@@ -353,7 +352,9 @@ int radeon_fbdev_init(struct radeon_device *rdev)
rfbdev->rdev = rdev;
rdev->mode_info.rfbdev = rfbdev;
- rfbdev->helper.funcs = &radeon_fb_helper_funcs;
+
+ drm_fb_helper_prepare(rdev->ddev, &rfbdev->helper,
+ &radeon_fb_helper_funcs);
ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
rdev->num_crtc,
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 2e723651069b..a053a0779aac 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -128,7 +128,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
if (rdev->gart.robj == NULL) {
r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- NULL, &rdev->gart.robj);
+ 0, NULL, &rdev->gart.robj);
if (r) {
return r;
}
@@ -243,7 +243,8 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
if (rdev->gart.ptr) {
- radeon_gart_set_page(rdev, t, page_base);
+ radeon_gart_set_page(rdev, t, page_base,
+ RADEON_GART_PAGE_DUMMY);
}
page_base += RADEON_GPU_PAGE_SIZE;
}
@@ -261,13 +262,15 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
* @pages: number of pages to bind
* @pagelist: pages to bind
* @dma_addr: DMA addresses of pages
+ * @flags: RADEON_GART_PAGE_* flags
*
* Binds the requested pages to the gart page table
* (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
- int pages, struct page **pagelist, dma_addr_t *dma_addr)
+ int pages, struct page **pagelist, dma_addr_t *dma_addr,
+ uint32_t flags)
{
unsigned t;
unsigned p;
@@ -287,7 +290,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
if (rdev->gart.ptr) {
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- radeon_gart_set_page(rdev, t, page_base);
+ radeon_gart_set_page(rdev, t, page_base, flags);
page_base += RADEON_GPU_PAGE_SIZE;
}
}
@@ -298,33 +301,6 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
}
/**
- * radeon_gart_restore - bind all pages in the gart page table
- *
- * @rdev: radeon_device pointer
- *
- * Binds all pages in the gart page table (all asics).
- * Used to rebuild the gart table on device startup or resume.
- */
-void radeon_gart_restore(struct radeon_device *rdev)
-{
- int i, j, t;
- u64 page_base;
-
- if (!rdev->gart.ptr) {
- return;
- }
- for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
- page_base = rdev->gart.pages_addr[i];
- for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- radeon_gart_set_page(rdev, t, page_base);
- page_base += RADEON_GPU_PAGE_SIZE;
- }
- }
- mb();
- radeon_gart_tlb_flush(rdev);
-}
-
-/**
* radeon_gart_init - init the driver info for managing the gart
*
* @rdev: radeon_device pointer
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d09650c1d720..bfd7e1b0ff3f 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -40,9 +40,9 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
}
}
-int radeon_gem_object_create(struct radeon_device *rdev, int size,
+int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
int alignment, int initial_domain,
- bool discardable, bool kernel,
+ u32 flags, bool kernel,
struct drm_gem_object **obj)
{
struct radeon_bo *robj;
@@ -55,23 +55,26 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
alignment = PAGE_SIZE;
}
- /* maximun bo size is the minimun btw visible vram and gtt size */
- max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
+ /* Maximum bo size is the unpinned gtt size since we use the gtt to
+ * handle vram to system pool migrations.
+ */
+ max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
if (size > max_size) {
- printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
- __func__, __LINE__, size >> 20, max_size >> 20);
+ DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
+ size >> 20, max_size >> 20);
return -ENOMEM;
}
retry:
- r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
+ r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
+ flags, NULL, &robj);
if (r) {
if (r != -ERESTARTSYS) {
if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
initial_domain |= RADEON_GEM_DOMAIN_GTT;
goto retry;
}
- DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
+ DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
size, initial_domain, alignment, r);
}
return r;
@@ -208,18 +211,15 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_info *args = data;
struct ttm_mem_type_manager *man;
- unsigned i;
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
args->vram_size = rdev->mc.real_vram_size;
args->vram_visible = (u64)man->size << PAGE_SHIFT;
- if (rdev->stollen_vga_memory)
- args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
- args->vram_visible -= radeon_fbdev_total_size(rdev);
- args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
- for(i = 0; i < RADEON_NUM_RINGS; ++i)
- args->gart_size -= rdev->ring[i].ring_size;
+ args->vram_visible -= rdev->vram_pin_size;
+ args->gart_size = rdev->mc.gtt_size;
+ args->gart_size -= rdev->gart_pin_size;
+
return 0;
}
@@ -252,8 +252,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */
args->size = roundup(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, args->alignment,
- args->initial_domain, false,
- false, &gobj);
+ args->initial_domain, args->flags,
+ false, &gobj);
if (r) {
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r);
@@ -358,16 +358,18 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *gobj;
struct radeon_bo *robj;
int r;
+ uint32_t cur_placement = 0;
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
- r = radeon_bo_wait(robj, NULL, false);
- /* callback hw specific functions if any */
- if (rdev->asic->ioctl_wait_idle)
- robj->rdev->asic->ioctl_wait_idle(rdev, robj);
+ r = radeon_bo_wait(robj, &cur_placement, false);
+ /* Flush HDP cache via MMIO if necessary */
+ if (rdev->asic->mmio_hdp_flush &&
+ radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
+ robj->rdev->asic->mmio_hdp_flush(rdev);
drm_gem_object_unreference_unlocked(gobj);
r = radeon_gem_handle_lockup(rdev, r);
return r;
@@ -461,11 +463,6 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
args->operation = RADEON_VA_RESULT_ERROR;
return -EINVAL;
}
- if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
- dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
- args->operation = RADEON_VA_RESULT_ERROR;
- return -EINVAL;
- }
switch (args->operation) {
case RADEON_VA_MAP:
@@ -499,9 +496,9 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case RADEON_VA_MAP:
- if (bo_va->soffset) {
+ if (bo_va->it.start) {
args->operation = RADEON_VA_RESULT_VA_EXIST;
- args->offset = bo_va->soffset;
+ args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
goto out;
}
r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
@@ -572,9 +569,8 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
args->size = ALIGN(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, 0,
- RADEON_GEM_DOMAIN_VRAM,
- false, ttm_bo_type_device,
- &gobj);
+ RADEON_GEM_DOMAIN_VRAM, 0,
+ false, &gobj);
if (r)
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
new file mode 100644
index 000000000000..65b0c213488d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ * Christian König
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+
+/*
+ * IB
+ * IBs (Indirect Buffers) and areas of GPU accessible memory where
+ * commands are stored. You can put a pointer to the IB in the
+ * command ring and the hw will fetch the commands from the IB
+ * and execute them. Generally userspace acceleration drivers
+ * produce command buffers which are send to the kernel and
+ * put in IBs for execution by the requested ring.
+ */
+static int radeon_debugfs_sa_init(struct radeon_device *rdev);
+
+/**
+ * radeon_ib_get - request an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ring: ring index the IB is associated with
+ * @ib: IB object returned
+ * @size: requested IB size
+ *
+ * Request an IB (all asics). IBs are allocated using the
+ * suballocator.
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+ struct radeon_ib *ib, struct radeon_vm *vm,
+ unsigned size)
+{
+ int r;
+
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
+ if (r) {
+ dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
+ return r;
+ }
+
+ r = radeon_semaphore_create(rdev, &ib->semaphore);
+ if (r) {
+ return r;
+ }
+
+ ib->ring = ring;
+ ib->fence = NULL;
+ ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
+ ib->vm = vm;
+ if (vm) {
+ /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
+ * space and soffset is the offset inside the pool bo
+ */
+ ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
+ } else {
+ ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
+ }
+ ib->is_const_ib = false;
+
+ return 0;
+}
+
+/**
+ * radeon_ib_free - free an IB (Indirect Buffer)
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to free
+ *
+ * Free an IB (all asics).
+ */
+void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
+ radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
+ radeon_fence_unref(&ib->fence);
+}
+
+/**
+ * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ * @const_ib: Const IB to schedule (SI only)
+ *
+ * Schedule an IB on the associated ring (all asics).
+ * Returns 0 on success, error on failure.
+ *
+ * On SI, there are two parallel engines fed from the primary ring,
+ * the CE (Constant Engine) and the DE (Drawing Engine). Since
+ * resource descriptors have moved to memory, the CE allows you to
+ * prime the caches while the DE is updating register state so that
+ * the resource descriptors will be already in cache when the draw is
+ * processed. To accomplish this, the userspace driver submits two
+ * IBs, one for the CE and one for the DE. If there is a CE IB (called
+ * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
+ * to SI there was just a DE IB.
+ */
+int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
+ struct radeon_ib *const_ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+ int r = 0;
+
+ if (!ib->length_dw || !ring->ready) {
+ /* TODO: Nothings in the ib we should report. */
+ dev_err(rdev->dev, "couldn't schedule ib\n");
+ return -EINVAL;
+ }
+
+ /* 64 dwords should be enough for fence too */
+ r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
+ if (r) {
+ dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
+ return r;
+ }
+
+ /* grab a vm id if necessary */
+ if (ib->vm) {
+ struct radeon_fence *vm_id_fence;
+ vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
+ radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
+ }
+
+ /* sync with other rings */
+ r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ if (ib->vm)
+ radeon_vm_flush(rdev, ib->vm, ib->ring);
+
+ if (const_ib) {
+ radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
+ radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
+ }
+ radeon_ring_ib_execute(rdev, ib->ring, ib);
+ r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
+ if (r) {
+ dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+ if (const_ib) {
+ const_ib->fence = radeon_fence_ref(ib->fence);
+ }
+
+ if (ib->vm)
+ radeon_vm_fence(rdev, ib->vm, ib->fence);
+
+ radeon_ring_unlock_commit(rdev, ring);
+ return 0;
+}
+
+/**
+ * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the suballocator to manage a pool of memory
+ * for use as IBs (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int radeon_ib_pool_init(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->ib_pool_ready) {
+ return 0;
+ }
+
+ if (rdev->family >= CHIP_BONAIRE) {
+ r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
+ RADEON_IB_POOL_SIZE*64*1024,
+ RADEON_GPU_PAGE_SIZE,
+ RADEON_GEM_DOMAIN_GTT,
+ RADEON_GEM_GTT_WC);
+ } else {
+ /* Before CIK, it's better to stick to cacheable GTT due
+ * to the command stream checking
+ */
+ r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
+ RADEON_IB_POOL_SIZE*64*1024,
+ RADEON_GPU_PAGE_SIZE,
+ RADEON_GEM_DOMAIN_GTT, 0);
+ }
+ if (r) {
+ return r;
+ }
+
+ r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
+ if (r) {
+ return r;
+ }
+
+ rdev->ib_pool_ready = true;
+ if (radeon_debugfs_sa_init(rdev)) {
+ dev_err(rdev->dev, "failed to register debugfs file for SA\n");
+ }
+ return 0;
+}
+
+/**
+ * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Tear down the suballocator managing the pool of memory
+ * for use as IBs (all asics).
+ */
+void radeon_ib_pool_fini(struct radeon_device *rdev)
+{
+ if (rdev->ib_pool_ready) {
+ radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
+ radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
+ rdev->ib_pool_ready = false;
+ }
+}
+
+/**
+ * radeon_ib_ring_tests - test IBs on the rings
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Test an IB (Indirect Buffer) on each ring.
+ * If the test fails, disable the ring.
+ * Returns 0 on success, error if the primary GFX ring
+ * IB test fails.
+ */
+int radeon_ib_ring_tests(struct radeon_device *rdev)
+{
+ unsigned i;
+ int r;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_ring *ring = &rdev->ring[i];
+
+ if (!ring->ready)
+ continue;
+
+ r = radeon_ib_test(rdev, i, ring);
+ if (r) {
+ ring->ready = false;
+ rdev->needs_reset = false;
+
+ if (i == RADEON_RING_TYPE_GFX_INDEX) {
+ /* oh, oh, that's really bad */
+ DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
+ rdev->accel_working = false;
+ return r;
+
+ } else {
+ /* still not good, but we can live with it */
+ DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * Debugfs info
+ */
+#if defined(CONFIG_DEBUG_FS)
+
+static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
+
+ return 0;
+
+}
+
+static struct drm_info_list radeon_debugfs_sa_list[] = {
+ {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
+};
+
+#endif
+
+static int radeon_debugfs_sa_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index d25ae6acfd5a..eb7164d07985 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -254,7 +254,18 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
break;
case RADEON_INFO_ACCEL_WORKING2:
- *value = rdev->accel_working;
+ if (rdev->family == CHIP_HAWAII) {
+ if (rdev->accel_working) {
+ if (rdev->new_fw)
+ *value = 3;
+ else
+ *value = 2;
+ } else {
+ *value = 0;
+ }
+ } else {
+ *value = rdev->accel_working;
+ }
break;
case RADEON_INFO_TILING_CONFIG:
if (rdev->family >= CHIP_BONAIRE)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 0592ddb0904b..e27608c29c11 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -685,10 +685,11 @@ extern bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
-extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
extern int radeon_get_monitor_bpc(struct drm_connector *connector);
+extern struct edid *radeon_connector_edid(struct drm_connector *connector);
+
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
struct drm_display_mode *mode);
@@ -738,7 +739,6 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
-extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 6c717b257d6d..480c87d8edc5 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -46,16 +46,6 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
* function are calling it.
*/
-static void radeon_bo_clear_va(struct radeon_bo *bo)
-{
- struct radeon_bo_va *bo_va, *tmp;
-
- list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
- /* remove from all vm address space */
- radeon_vm_bo_rmv(bo->rdev, bo_va);
- }
-}
-
static void radeon_update_memory_usage(struct radeon_bo *bo,
unsigned mem_type, int sign)
{
@@ -90,7 +80,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
- radeon_bo_clear_va(bo);
+ WARN_ON(!list_empty(&bo->va));
drm_gem_object_release(&bo->gem_base);
kfree(bo);
}
@@ -114,15 +104,23 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
if (domain & RADEON_GEM_DOMAIN_GTT) {
- if (rbo->rdev->flags & RADEON_IS_AGP) {
- rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
+ if (rbo->flags & RADEON_GEM_GTT_UC) {
+ rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT;
+ } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
+ (rbo->rdev->flags & RADEON_IS_AGP)) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_TT;
} else {
rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
}
}
if (domain & RADEON_GEM_DOMAIN_CPU) {
- if (rbo->rdev->flags & RADEON_IS_AGP) {
- rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
+ if (rbo->flags & RADEON_GEM_GTT_UC) {
+ rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM;
+ } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
+ rbo->rdev->flags & RADEON_IS_AGP) {
+ rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+ TTM_PL_FLAG_SYSTEM;
} else {
rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
}
@@ -146,7 +144,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, bool kernel, u32 domain,
- struct sg_table *sg, struct radeon_bo **bo_ptr)
+ u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr)
{
struct radeon_bo *bo;
enum ttm_bo_type type;
@@ -183,6 +181,12 @@ int radeon_bo_create(struct radeon_device *rdev,
bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
RADEON_GEM_DOMAIN_GTT |
RADEON_GEM_DOMAIN_CPU);
+
+ bo->flags = flags;
+ /* PCI GART is always snooped */
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
down_read(&rdev->pm.mclk_lock);
@@ -232,6 +236,15 @@ void radeon_bo_kunmap(struct radeon_bo *bo)
ttm_bo_kunmap(&bo->kmap);
}
+struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
+{
+ if (bo == NULL)
+ return NULL;
+
+ ttm_bo_reference(&bo->tbo);
+ return bo;
+}
+
void radeon_bo_unref(struct radeon_bo **bo)
{
struct ttm_buffer_object *tbo;
@@ -241,9 +254,7 @@ void radeon_bo_unref(struct radeon_bo **bo)
return;
rdev = (*bo)->rdev;
tbo = &((*bo)->tbo);
- down_read(&rdev->pm.mclk_lock);
ttm_bo_unref(&tbo);
- up_read(&rdev->pm.mclk_lock);
if (tbo == NULL)
*bo = NULL;
}
@@ -292,9 +303,13 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
bo->pin_count = 1;
if (gpu_addr != NULL)
*gpu_addr = radeon_bo_gpu_offset(bo);
- }
- if (unlikely(r != 0))
+ if (domain == RADEON_GEM_DOMAIN_VRAM)
+ bo->rdev->vram_pin_size += radeon_bo_size(bo);
+ else
+ bo->rdev->gart_pin_size += radeon_bo_size(bo);
+ } else {
dev_err(bo->rdev->dev, "%p pin failed\n", bo);
+ }
return r;
}
@@ -317,8 +332,14 @@ int radeon_bo_unpin(struct radeon_bo *bo)
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (unlikely(r != 0))
+ if (likely(r == 0)) {
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+ bo->rdev->vram_pin_size -= radeon_bo_size(bo);
+ else
+ bo->rdev->gart_pin_size -= radeon_bo_size(bo);
+ } else {
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
+ }
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 5a873f31a171..98a47fdf3625 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -124,11 +124,12 @@ extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
extern int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align,
- bool kernel, u32 domain,
+ bool kernel, u32 domain, u32 flags,
struct sg_table *sg,
struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
+extern struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo);
extern void radeon_bo_unref(struct radeon_bo **bo);
extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
@@ -170,7 +171,8 @@ static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- unsigned size, u32 align, u32 domain);
+ unsigned size, u32 align, u32 domain,
+ u32 flags);
extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index e447e390d09a..23314be49480 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1303,10 +1303,6 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_RS780:
case CHIP_RS880:
case CHIP_RV770:
- case CHIP_BARTS:
- case CHIP_TURKS:
- case CHIP_CAICOS:
- case CHIP_CAYMAN:
/* DPM requires the RLC, RV770+ dGPU requires SMC */
if (!rdev->rlc_fw)
rdev->pm.pm_method = PM_METHOD_PROFILE;
@@ -1330,6 +1326,10 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
+ case CHIP_BARTS:
+ case CHIP_TURKS:
+ case CHIP_CAICOS:
+ case CHIP_CAYMAN:
case CHIP_ARUBA:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
@@ -1400,9 +1400,7 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
}
radeon_hwmon_fini(rdev);
-
- if (rdev->pm.power_state)
- kfree(rdev->pm.power_state);
+ kfree(rdev->pm.power_state);
}
static void radeon_pm_fini_dpm(struct radeon_device *rdev)
@@ -1421,9 +1419,7 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
radeon_dpm_fini(rdev);
radeon_hwmon_fini(rdev);
-
- if (rdev->pm.power_state)
- kfree(rdev->pm.power_state);
+ kfree(rdev->pm.power_state);
}
void radeon_pm_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 28d71070c389..0b16f2cbcf17 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -65,7 +65,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
int ret;
ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
- RADEON_GEM_DOMAIN_GTT, sg, &bo);
+ RADEON_GEM_DOMAIN_GTT, 0, sg, &bo);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index f8050f5429e2..5b4e0cf231a0 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -26,258 +26,8 @@
* Jerome Glisse
* Christian König
*/
-#include <linux/seq_file.h>
-#include <linux/slab.h>
#include <drm/drmP.h>
-#include <drm/radeon_drm.h>
-#include "radeon_reg.h"
#include "radeon.h"
-#include "atom.h"
-
-/*
- * IB
- * IBs (Indirect Buffers) and areas of GPU accessible memory where
- * commands are stored. You can put a pointer to the IB in the
- * command ring and the hw will fetch the commands from the IB
- * and execute them. Generally userspace acceleration drivers
- * produce command buffers which are send to the kernel and
- * put in IBs for execution by the requested ring.
- */
-static int radeon_debugfs_sa_init(struct radeon_device *rdev);
-
-/**
- * radeon_ib_get - request an IB (Indirect Buffer)
- *
- * @rdev: radeon_device pointer
- * @ring: ring index the IB is associated with
- * @ib: IB object returned
- * @size: requested IB size
- *
- * Request an IB (all asics). IBs are allocated using the
- * suballocator.
- * Returns 0 on success, error on failure.
- */
-int radeon_ib_get(struct radeon_device *rdev, int ring,
- struct radeon_ib *ib, struct radeon_vm *vm,
- unsigned size)
-{
- int r;
-
- r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
- if (r) {
- dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
- return r;
- }
-
- r = radeon_semaphore_create(rdev, &ib->semaphore);
- if (r) {
- return r;
- }
-
- ib->ring = ring;
- ib->fence = NULL;
- ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
- ib->vm = vm;
- if (vm) {
- /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
- * space and soffset is the offset inside the pool bo
- */
- ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
- } else {
- ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
- }
- ib->is_const_ib = false;
-
- return 0;
-}
-
-/**
- * radeon_ib_free - free an IB (Indirect Buffer)
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to free
- *
- * Free an IB (all asics).
- */
-void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
-{
- radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
- radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
- radeon_fence_unref(&ib->fence);
-}
-
-/**
- * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to schedule
- * @const_ib: Const IB to schedule (SI only)
- *
- * Schedule an IB on the associated ring (all asics).
- * Returns 0 on success, error on failure.
- *
- * On SI, there are two parallel engines fed from the primary ring,
- * the CE (Constant Engine) and the DE (Drawing Engine). Since
- * resource descriptors have moved to memory, the CE allows you to
- * prime the caches while the DE is updating register state so that
- * the resource descriptors will be already in cache when the draw is
- * processed. To accomplish this, the userspace driver submits two
- * IBs, one for the CE and one for the DE. If there is a CE IB (called
- * a CONST_IB), it will be put on the ring prior to the DE IB. Prior
- * to SI there was just a DE IB.
- */
-int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
- struct radeon_ib *const_ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
- int r = 0;
-
- if (!ib->length_dw || !ring->ready) {
- /* TODO: Nothings in the ib we should report. */
- dev_err(rdev->dev, "couldn't schedule ib\n");
- return -EINVAL;
- }
-
- /* 64 dwords should be enough for fence too */
- r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
- if (r) {
- dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
- return r;
- }
-
- /* grab a vm id if necessary */
- if (ib->vm) {
- struct radeon_fence *vm_id_fence;
- vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
- radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
- }
-
- /* sync with other rings */
- r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
- if (r) {
- dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
-
- if (ib->vm)
- radeon_vm_flush(rdev, ib->vm, ib->ring);
-
- if (const_ib) {
- radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
- radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
- }
- radeon_ring_ib_execute(rdev, ib->ring, ib);
- r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
- if (r) {
- dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
- if (const_ib) {
- const_ib->fence = radeon_fence_ref(ib->fence);
- }
-
- if (ib->vm)
- radeon_vm_fence(rdev, ib->vm, ib->fence);
-
- radeon_ring_unlock_commit(rdev, ring);
- return 0;
-}
-
-/**
- * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
- *
- * @rdev: radeon_device pointer
- *
- * Initialize the suballocator to manage a pool of memory
- * for use as IBs (all asics).
- * Returns 0 on success, error on failure.
- */
-int radeon_ib_pool_init(struct radeon_device *rdev)
-{
- int r;
-
- if (rdev->ib_pool_ready) {
- return 0;
- }
- r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
- RADEON_IB_POOL_SIZE*64*1024,
- RADEON_GPU_PAGE_SIZE,
- RADEON_GEM_DOMAIN_GTT);
- if (r) {
- return r;
- }
-
- r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
- if (r) {
- return r;
- }
-
- rdev->ib_pool_ready = true;
- if (radeon_debugfs_sa_init(rdev)) {
- dev_err(rdev->dev, "failed to register debugfs file for SA\n");
- }
- return 0;
-}
-
-/**
- * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
- *
- * @rdev: radeon_device pointer
- *
- * Tear down the suballocator managing the pool of memory
- * for use as IBs (all asics).
- */
-void radeon_ib_pool_fini(struct radeon_device *rdev)
-{
- if (rdev->ib_pool_ready) {
- radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
- radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
- rdev->ib_pool_ready = false;
- }
-}
-
-/**
- * radeon_ib_ring_tests - test IBs on the rings
- *
- * @rdev: radeon_device pointer
- *
- * Test an IB (Indirect Buffer) on each ring.
- * If the test fails, disable the ring.
- * Returns 0 on success, error if the primary GFX ring
- * IB test fails.
- */
-int radeon_ib_ring_tests(struct radeon_device *rdev)
-{
- unsigned i;
- int r;
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- struct radeon_ring *ring = &rdev->ring[i];
-
- if (!ring->ready)
- continue;
-
- r = radeon_ib_test(rdev, i, ring);
- if (r) {
- ring->ready = false;
- rdev->needs_reset = false;
-
- if (i == RADEON_RING_TYPE_GFX_INDEX) {
- /* oh, oh, that's really bad */
- DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
- rdev->accel_working = false;
- return r;
-
- } else {
- /* still not good, but we can live with it */
- DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
- }
- }
- }
- return 0;
-}
/*
* Rings
@@ -433,11 +183,21 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
*/
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
+ /* If we are emitting the HDP flush via the ring buffer, we need to
+ * do it before padding.
+ */
+ if (rdev->asic->ring[ring->idx]->hdp_flush)
+ rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
/* We pad to match fetch size */
while (ring->wptr & ring->align_mask) {
radeon_ring_write(ring, ring->nop);
}
mb();
+ /* If we are emitting the HDP flush via MMIO, we need to do it after
+ * all CPU writes to VRAM finished.
+ */
+ if (rdev->asic->mmio_hdp_flush)
+ rdev->asic->mmio_hdp_flush(rdev);
radeon_ring_set_wptr(rdev, ring);
}
@@ -641,6 +401,8 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
if (ring->ring_obj == NULL) {
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
+ (rdev->flags & RADEON_IS_PCIE) ?
+ RADEON_GEM_GTT_WC : 0,
NULL, &ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
@@ -791,22 +553,6 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = {
{"radeon_ring_vce2", radeon_debugfs_ring_info, 0, &si_vce2_index},
};
-static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
-
- return 0;
-
-}
-
-static struct drm_info_list radeon_debugfs_sa_list[] = {
- {"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
-};
-
#endif
static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
@@ -828,12 +574,3 @@ static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ri
#endif
return 0;
}
-
-static int radeon_debugfs_sa_init(struct radeon_device *rdev)
-{
-#if defined(CONFIG_DEBUG_FS)
- return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
-#else
- return 0;
-#endif
-}
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index adcf3e2f07da..b84f97c8718c 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -49,7 +49,7 @@ static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- unsigned size, u32 align, u32 domain)
+ unsigned size, u32 align, u32 domain, u32 flags)
{
int i, r;
@@ -65,7 +65,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
}
r = radeon_bo_create(rdev, size, align, true,
- domain, NULL, &sa_manager->bo);
+ domain, flags, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 3a13e0d1055c..5adf4207453d 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -56,13 +56,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size
*/
- n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- n -= rdev->ring[i].ring_size;
- if (rdev->wb.wb_obj)
- n -= RADEON_GPU_PAGE_SIZE;
- if (rdev->ih.ring_obj)
- n -= rdev->ih.ring_size;
+ n = rdev->mc.gtt_size - rdev->gart_pin_size;
n /= size;
gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
@@ -73,7 +67,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
}
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
- NULL, &vram_obj);
+ 0, NULL, &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
@@ -93,7 +87,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
struct radeon_fence *fence = NULL;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i);
+ RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean;
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index f749f2c3bbdb..9db74a96ef61 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -72,8 +72,8 @@ TRACE_EVENT(radeon_vm_bo_update,
),
TP_fast_assign(
- __entry->soffset = bo_va->soffset;
- __entry->eoffset = bo_va->eoffset;
+ __entry->soffset = bo_va->it.start;
+ __entry->eoffset = bo_va->it.last + 1;
__entry->flags = bo_va->flags;
),
TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
@@ -104,6 +104,24 @@ TRACE_EVENT(radeon_vm_set_page,
__entry->flags, __entry->count)
);
+TRACE_EVENT(radeon_vm_flush,
+ TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
+ TP_ARGS(pd_addr, ring, id),
+ TP_STRUCT__entry(
+ __field(u64, pd_addr)
+ __field(u32, ring)
+ __field(u32, id)
+ ),
+
+ TP_fast_assign(
+ __entry->pd_addr = pd_addr;
+ __entry->ring = ring;
+ __entry->id = id;
+ ),
+ TP_printk("pd_addr=%010Lx, ring=%u, id=%u",
+ __entry->pd_addr, __entry->ring, __entry->id)
+);
+
DECLARE_EVENT_CLASS(radeon_fence_request,
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index c8a8a5144ec1..72afe82a95c9 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -521,6 +521,8 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
struct radeon_ttm_tt *gtt = (void*)ttm;
+ uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
+ RADEON_GART_PAGE_WRITE;
int r;
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
@@ -528,8 +530,10 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
}
- r = radeon_gart_bind(gtt->rdev, gtt->offset,
- ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
+ if (ttm->caching_state == tt_cached)
+ flags |= RADEON_GART_PAGE_SNOOP;
+ r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
+ ttm->pages, gtt->ttm.dma_address, flags);
if (r) {
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
ttm->num_pages, (unsigned)gtt->offset);
@@ -726,7 +730,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM,
+ RADEON_GEM_DOMAIN_VRAM, 0,
NULL, &rdev->stollen_vga_memory);
if (r) {
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.c b/drivers/gpu/drm/radeon/radeon_ucode.c
new file mode 100644
index 000000000000..6beec680390c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ucode.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_ucode.h"
+
+static void radeon_ucode_print_common_hdr(const struct common_firmware_header *hdr)
+{
+ DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
+ DRM_DEBUG("header_size_bytes: %u\n", le32_to_cpu(hdr->header_size_bytes));
+ DRM_DEBUG("header_version_major: %u\n", le16_to_cpu(hdr->header_version_major));
+ DRM_DEBUG("header_version_minor: %u\n", le16_to_cpu(hdr->header_version_minor));
+ DRM_DEBUG("ip_version_major: %u\n", le16_to_cpu(hdr->ip_version_major));
+ DRM_DEBUG("ip_version_minor: %u\n", le16_to_cpu(hdr->ip_version_minor));
+ DRM_DEBUG("ucode_version: 0x%08x\n", le32_to_cpu(hdr->ucode_version));
+ DRM_DEBUG("ucode_size_bytes: %u\n", le32_to_cpu(hdr->ucode_size_bytes));
+ DRM_DEBUG("ucode_array_offset_bytes: %u\n",
+ le32_to_cpu(hdr->ucode_array_offset_bytes));
+ DRM_DEBUG("crc32: 0x%08x\n", le32_to_cpu(hdr->crc32));
+}
+
+void radeon_ucode_print_mc_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("MC\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct mc_firmware_header_v1_0 *mc_hdr =
+ container_of(hdr, struct mc_firmware_header_v1_0, header);
+
+ DRM_DEBUG("io_debug_size_bytes: %u\n",
+ le32_to_cpu(mc_hdr->io_debug_size_bytes));
+ DRM_DEBUG("io_debug_array_offset_bytes: %u\n",
+ le32_to_cpu(mc_hdr->io_debug_array_offset_bytes));
+ } else {
+ DRM_ERROR("Unknown MC ucode version: %u.%u\n", version_major, version_minor);
+ }
+}
+
+void radeon_ucode_print_smc_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("SMC\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct smc_firmware_header_v1_0 *smc_hdr =
+ container_of(hdr, struct smc_firmware_header_v1_0, header);
+
+ DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr->ucode_start_addr));
+ } else {
+ DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor);
+ }
+}
+
+void radeon_ucode_print_gfx_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("GFX\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct gfx_firmware_header_v1_0 *gfx_hdr =
+ container_of(hdr, struct gfx_firmware_header_v1_0, header);
+
+ DRM_DEBUG("ucode_feature_version: %u\n",
+ le32_to_cpu(gfx_hdr->ucode_feature_version));
+ DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr->jt_offset));
+ DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr->jt_size));
+ } else {
+ DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor);
+ }
+}
+
+void radeon_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("RLC\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct rlc_firmware_header_v1_0 *rlc_hdr =
+ container_of(hdr, struct rlc_firmware_header_v1_0, header);
+
+ DRM_DEBUG("ucode_feature_version: %u\n",
+ le32_to_cpu(rlc_hdr->ucode_feature_version));
+ DRM_DEBUG("save_and_restore_offset: %u\n",
+ le32_to_cpu(rlc_hdr->save_and_restore_offset));
+ DRM_DEBUG("clear_state_descriptor_offset: %u\n",
+ le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
+ DRM_DEBUG("avail_scratch_ram_locations: %u\n",
+ le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
+ DRM_DEBUG("master_pkt_description_offset: %u\n",
+ le32_to_cpu(rlc_hdr->master_pkt_description_offset));
+ } else {
+ DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
+ }
+}
+
+void radeon_ucode_print_sdma_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("SDMA\n");
+ radeon_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct sdma_firmware_header_v1_0 *sdma_hdr =
+ container_of(hdr, struct sdma_firmware_header_v1_0, header);
+
+ DRM_DEBUG("ucode_feature_version: %u\n",
+ le32_to_cpu(sdma_hdr->ucode_feature_version));
+ DRM_DEBUG("ucode_change_version: %u\n",
+ le32_to_cpu(sdma_hdr->ucode_change_version));
+ DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(sdma_hdr->jt_offset));
+ DRM_DEBUG("jt_size: %u\n", le32_to_cpu(sdma_hdr->jt_size));
+ } else {
+ DRM_ERROR("Unknown SDMA ucode version: %u.%u\n",
+ version_major, version_minor);
+ }
+}
+
+int radeon_ucode_validate(const struct firmware *fw)
+{
+ const struct common_firmware_header *hdr =
+ (const struct common_firmware_header *)fw->data;
+
+ if (fw->size == le32_to_cpu(hdr->size_bytes))
+ return 0;
+
+ return -EINVAL;
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
index 4e7c3269b183..dc4576e4d8ad 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.h
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -153,4 +153,75 @@
#define HAWAII_SMC_UCODE_START 0x20000
#define HAWAII_SMC_UCODE_SIZE 0x1FDEC
+struct common_firmware_header {
+ uint32_t size_bytes; /* size of the entire header+image(s) in bytes */
+ uint32_t header_size_bytes; /* size of just the header in bytes */
+ uint16_t header_version_major; /* header version */
+ uint16_t header_version_minor; /* header version */
+ uint16_t ip_version_major; /* IP version */
+ uint16_t ip_version_minor; /* IP version */
+ uint32_t ucode_version;
+ uint32_t ucode_size_bytes; /* size of ucode in bytes */
+ uint32_t ucode_array_offset_bytes; /* payload offset from the start of the header */
+ uint32_t crc32; /* crc32 checksum of the payload */
+};
+
+/* version_major=1, version_minor=0 */
+struct mc_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t io_debug_size_bytes; /* size of debug array in dwords */
+ uint32_t io_debug_array_offset_bytes; /* payload offset from the start of the header */
+};
+
+/* version_major=1, version_minor=0 */
+struct smc_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_start_addr;
+};
+
+/* version_major=1, version_minor=0 */
+struct gfx_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_feature_version;
+ uint32_t jt_offset; /* jt location */
+ uint32_t jt_size; /* size of jt */
+};
+
+/* version_major=1, version_minor=0 */
+struct rlc_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_feature_version;
+ uint32_t save_and_restore_offset;
+ uint32_t clear_state_descriptor_offset;
+ uint32_t avail_scratch_ram_locations;
+ uint32_t master_pkt_description_offset;
+};
+
+/* version_major=1, version_minor=0 */
+struct sdma_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_feature_version;
+ uint32_t ucode_change_version;
+ uint32_t jt_offset; /* jt location */
+ uint32_t jt_size; /* size of jt */
+};
+
+/* header is fixed size */
+union radeon_firmware_header {
+ struct common_firmware_header common;
+ struct mc_firmware_header_v1_0 mc;
+ struct smc_firmware_header_v1_0 smc;
+ struct gfx_firmware_header_v1_0 gfx;
+ struct rlc_firmware_header_v1_0 rlc;
+ struct sdma_firmware_header_v1_0 sdma;
+ uint8_t raw[0x100];
+};
+
+void radeon_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_gfx_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
+void radeon_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
+int radeon_ucode_validate(const struct firmware *fw);
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index a4ad270e8261..6bf55ec85b62 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -117,7 +117,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
@@ -674,7 +674,7 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
int r, i;
r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
if (r)
return r;
@@ -720,7 +720,7 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
int r, i;
r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
if (r)
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index aa21c31a846c..f9b70a43aa52 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -126,7 +126,7 @@ int radeon_vce_init(struct radeon_device *rdev)
size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) +
RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->vce.vcpu_bo);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->vce.vcpu_bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 725d3669014f..ccae4d9dc3de 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -238,8 +238,8 @@ void radeon_vm_flush(struct radeon_device *rdev,
uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
/* if we can't remember our last VM flush then flush now! */
- /* XXX figure out why we have to flush all the time */
- if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) {
+ if (!vm->last_flush || pd_addr != vm->pd_gpu_addr) {
+ trace_radeon_vm_flush(pd_addr, ring, vm->id);
vm->pd_gpu_addr = pd_addr;
radeon_ring_vm_flush(rdev, ring, vm);
}
@@ -325,17 +325,15 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
}
bo_va->vm = vm;
bo_va->bo = bo;
- bo_va->soffset = 0;
- bo_va->eoffset = 0;
+ bo_va->it.start = 0;
+ bo_va->it.last = 0;
bo_va->flags = 0;
- bo_va->valid = false;
+ bo_va->addr = 0;
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
- INIT_LIST_HEAD(&bo_va->vm_list);
INIT_LIST_HEAD(&bo_va->vm_status);
mutex_lock(&vm->mutex);
- list_add(&bo_va->vm_list, &vm->va);
list_add_tail(&bo_va->bo_list, &bo->va);
mutex_unlock(&vm->mutex);
@@ -343,6 +341,42 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
}
/**
+ * radeon_vm_set_pages - helper to call the right asic function
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Traces the parameters and calls the right asic functions
+ * to setup the page table using the DMA.
+ */
+static void radeon_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ trace_radeon_vm_set_page(pe, addr, count, incr, flags);
+
+ if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
+ uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
+ radeon_asic_vm_copy_pages(rdev, ib, pe, src, count);
+
+ } else if ((flags & R600_PTE_SYSTEM) || (count < 3)) {
+ radeon_asic_vm_write_pages(rdev, ib, pe, addr,
+ count, incr, flags);
+
+ } else {
+ radeon_asic_vm_set_pages(rdev, ib, pe, addr,
+ count, incr, flags);
+ }
+}
+
+/**
* radeon_vm_clear_bo - initially clear the page dir/table
*
* @rdev: radeon_device pointer
@@ -376,14 +410,15 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
addr = radeon_bo_gpu_offset(bo);
entries = radeon_bo_size(bo) / 8;
- r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
- NULL, entries * 2 + 64);
+ r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256);
if (r)
goto error;
ib.length_dw = 0;
- radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0);
+ radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0);
+ radeon_asic_vm_pad_ib(rdev, &ib);
+ WARN_ON(ib.length_dw > 64);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r)
@@ -419,11 +454,9 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
uint32_t flags)
{
uint64_t size = radeon_bo_size(bo_va->bo);
- uint64_t eoffset, last_offset = 0;
struct radeon_vm *vm = bo_va->vm;
- struct radeon_bo_va *tmp;
- struct list_head *head;
unsigned last_pfn, pt_idx;
+ uint64_t eoffset;
int r;
if (soffset) {
@@ -445,51 +478,49 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
}
mutex_lock(&vm->mutex);
- head = &vm->va;
- last_offset = 0;
- list_for_each_entry(tmp, &vm->va, vm_list) {
- if (bo_va == tmp) {
- /* skip over currently modified bo */
- continue;
+ if (bo_va->it.start || bo_va->it.last) {
+ if (bo_va->addr) {
+ /* add a clone of the bo_va to clear the old address */
+ struct radeon_bo_va *tmp;
+ tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ tmp->it.start = bo_va->it.start;
+ tmp->it.last = bo_va->it.last;
+ tmp->vm = vm;
+ tmp->addr = bo_va->addr;
+ tmp->bo = radeon_bo_ref(bo_va->bo);
+ list_add(&tmp->vm_status, &vm->freed);
}
- if (soffset >= last_offset && eoffset <= tmp->soffset) {
- /* bo can be added before this one */
- break;
- }
- if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
- /* bo and tmp overlap, invalid offset */
- dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
- bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
- (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
- mutex_unlock(&vm->mutex);
- return -EINVAL;
- }
- last_offset = tmp->eoffset;
- head = &tmp->vm_list;
+ interval_tree_remove(&bo_va->it, &vm->va);
+ bo_va->it.start = 0;
+ bo_va->it.last = 0;
}
- if (bo_va->soffset) {
- /* add a clone of the bo_va to clear the old address */
- tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
- if (!tmp) {
+ soffset /= RADEON_GPU_PAGE_SIZE;
+ eoffset /= RADEON_GPU_PAGE_SIZE;
+ if (soffset || eoffset) {
+ struct interval_tree_node *it;
+ it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
+ if (it) {
+ struct radeon_bo_va *tmp;
+ tmp = container_of(it, struct radeon_bo_va, it);
+ /* bo and tmp overlap, invalid offset */
+ dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
+ "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
+ soffset, tmp->bo, tmp->it.start, tmp->it.last);
mutex_unlock(&vm->mutex);
- return -ENOMEM;
+ return -EINVAL;
}
- tmp->soffset = bo_va->soffset;
- tmp->eoffset = bo_va->eoffset;
- tmp->vm = vm;
- list_add(&tmp->vm_status, &vm->freed);
+ bo_va->it.start = soffset;
+ bo_va->it.last = eoffset - 1;
+ interval_tree_insert(&bo_va->it, &vm->va);
}
- bo_va->soffset = soffset;
- bo_va->eoffset = eoffset;
bo_va->flags = flags;
- bo_va->valid = false;
- list_move(&bo_va->vm_list, head);
+ bo_va->addr = 0;
- soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
- eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
+ soffset >>= radeon_vm_block_size;
+ eoffset >>= radeon_vm_block_size;
BUG_ON(eoffset >= radeon_vm_num_pdes(rdev));
@@ -510,7 +541,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
RADEON_GPU_PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &pt);
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL, &pt);
if (r)
return r;
@@ -611,7 +642,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
ndw = 64;
/* assume the worst case */
- ndw += vm->max_pde_used * 16;
+ ndw += vm->max_pde_used * 6;
/* update too big for an IB */
if (ndw > 0xfffff)
@@ -640,9 +671,9 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
((last_pt + incr * count) != pt)) {
if (count) {
- radeon_asic_vm_set_page(rdev, &ib, last_pde,
- last_pt, count, incr,
- R600_PTE_VALID);
+ radeon_vm_set_pages(rdev, &ib, last_pde,
+ last_pt, count, incr,
+ R600_PTE_VALID);
}
count = 1;
@@ -654,12 +685,14 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
}
if (count)
- radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count,
- incr, R600_PTE_VALID);
+ radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count,
+ incr, R600_PTE_VALID);
if (ib.length_dw != 0) {
+ radeon_asic_vm_pad_ib(rdev, &ib);
radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
+ WARN_ON(ib.length_dw > ndw);
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_ib_free(rdev, &ib);
@@ -725,30 +758,30 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
(frag_start >= frag_end)) {
count = (pe_end - pe_start) / 8;
- radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
- RADEON_GPU_PAGE_SIZE, flags);
+ radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags);
return;
}
/* handle the 4K area at the beginning */
if (pe_start != frag_start) {
count = (frag_start - pe_start) / 8;
- radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
- RADEON_GPU_PAGE_SIZE, flags);
+ radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags);
addr += RADEON_GPU_PAGE_SIZE * count;
}
/* handle the area in the middle */
count = (frag_end - frag_start) / 8;
- radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count,
- RADEON_GPU_PAGE_SIZE, flags | frag_flags);
+ radeon_vm_set_pages(rdev, ib, frag_start, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags | frag_flags);
/* handle the 4K area at the end */
if (frag_end != pe_end) {
addr += RADEON_GPU_PAGE_SIZE * count;
count = (pe_end - frag_end) / 8;
- radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count,
- RADEON_GPU_PAGE_SIZE, flags);
+ radeon_vm_set_pages(rdev, ib, frag_end, addr, count,
+ RADEON_GPU_PAGE_SIZE, flags);
}
}
@@ -777,9 +810,6 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
unsigned count = 0;
uint64_t addr;
- start = start / RADEON_GPU_PAGE_SIZE;
- end = end / RADEON_GPU_PAGE_SIZE;
-
/* walk over the address space and update the page tables */
for (addr = start; addr < end; ) {
uint64_t pt_idx = addr >> radeon_vm_block_size;
@@ -842,55 +872,73 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
{
struct radeon_vm *vm = bo_va->vm;
struct radeon_ib ib;
- unsigned nptes, ndw;
+ unsigned nptes, ncmds, ndw;
uint64_t addr;
+ uint32_t flags;
int r;
-
- if (!bo_va->soffset) {
+ if (!bo_va->it.start) {
dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
bo_va->bo, vm);
return -EINVAL;
}
- if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
- return 0;
+ list_del_init(&bo_va->vm_status);
bo_va->flags &= ~RADEON_VM_PAGE_VALID;
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
+ bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
if (mem) {
addr = mem->start << PAGE_SHIFT;
if (mem->mem_type != TTM_PL_SYSTEM) {
bo_va->flags |= RADEON_VM_PAGE_VALID;
- bo_va->valid = true;
}
if (mem->mem_type == TTM_PL_TT) {
bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
+ if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
+ bo_va->flags |= RADEON_VM_PAGE_SNOOPED;
+
} else {
addr += rdev->vm_manager.vram_base_offset;
}
} else {
addr = 0;
- bo_va->valid = false;
}
+ if (addr == bo_va->addr)
+ return 0;
+ bo_va->addr = addr;
+
trace_radeon_vm_bo_update(bo_va);
- nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
+ nptes = bo_va->it.last - bo_va->it.start + 1;
+
+ /* reserve space for one command every (1 << BLOCK_SIZE) entries
+ or 2k dwords (whatever is smaller) */
+ ncmds = (nptes >> min(radeon_vm_block_size, 11)) + 1;
/* padding, etc. */
ndw = 64;
- if (radeon_vm_block_size > 11)
- /* reserve space for one header for every 2k dwords */
- ndw += (nptes >> 11) * 4;
- else
- /* reserve space for one header for
- every (1 << BLOCK_SIZE) entries */
- ndw += (nptes >> radeon_vm_block_size) * 4;
+ flags = radeon_vm_page_flags(bo_va->flags);
+ if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
+ /* only copy commands needed */
+ ndw += ncmds * 7;
- /* reserve space for pte addresses */
- ndw += nptes * 2;
+ } else if (flags & R600_PTE_SYSTEM) {
+ /* header for write data commands */
+ ndw += ncmds * 4;
+
+ /* body of write data command */
+ ndw += nptes * 2;
+
+ } else {
+ /* set page commands needed */
+ ndw += ncmds * 10;
+
+ /* two extra commands for begin/end of fragment */
+ ndw += 2 * 10;
+ }
/* update too big for an IB */
if (ndw > 0xfffff)
@@ -901,8 +949,12 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
return r;
ib.length_dw = 0;
- radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
- addr, radeon_vm_page_flags(bo_va->flags));
+ radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
+ bo_va->it.last + 1, addr,
+ radeon_vm_page_flags(bo_va->flags));
+
+ radeon_asic_vm_pad_ib(rdev, &ib);
+ WARN_ON(ib.length_dw > ndw);
radeon_semaphore_sync_to(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL);
@@ -936,8 +988,8 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
int r;
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
- list_del(&bo_va->vm_status);
r = radeon_vm_bo_update(rdev, bo_va, NULL);
+ radeon_bo_unref(&bo_va->bo);
kfree(bo_va);
if (r)
return r;
@@ -947,6 +999,31 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
}
/**
+ * radeon_vm_clear_invalids - clear invalidated BOs in the PT
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Make sure all invalidated BOs are cleared in the PT.
+ * Returns 0 for success.
+ *
+ * PTs have to be reserved and mutex must be locked!
+ */
+int radeon_vm_clear_invalids(struct radeon_device *rdev,
+ struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+ int r;
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) {
+ r = radeon_vm_bo_update(rdev, bo_va, NULL);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
+/**
* radeon_vm_bo_rmv - remove a bo to a specific vm
*
* @rdev: radeon_device pointer
@@ -964,10 +1041,11 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
list_del(&bo_va->bo_list);
mutex_lock(&vm->mutex);
- list_del(&bo_va->vm_list);
+ interval_tree_remove(&bo_va->it, &vm->va);
+ list_del(&bo_va->vm_status);
- if (bo_va->soffset) {
- bo_va->bo = NULL;
+ if (bo_va->addr) {
+ bo_va->bo = radeon_bo_ref(bo_va->bo);
list_add(&bo_va->vm_status, &vm->freed);
} else {
kfree(bo_va);
@@ -991,7 +1069,12 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo_va *bo_va;
list_for_each_entry(bo_va, &bo->va, bo_list) {
- bo_va->valid = false;
+ if (bo_va->addr) {
+ mutex_lock(&bo_va->vm->mutex);
+ list_del(&bo_va->vm_status);
+ list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
+ mutex_unlock(&bo_va->vm->mutex);
+ }
}
}
@@ -1016,7 +1099,8 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
vm->last_flush = NULL;
vm->last_id_use = NULL;
mutex_init(&vm->mutex);
- INIT_LIST_HEAD(&vm->va);
+ vm->va = RB_ROOT;
+ INIT_LIST_HEAD(&vm->invalidated);
INIT_LIST_HEAD(&vm->freed);
pd_size = radeon_vm_directory_size(rdev);
@@ -1031,7 +1115,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
}
r = radeon_bo_create(rdev, pd_size, align, true,
- RADEON_GEM_DOMAIN_VRAM, NULL,
+ RADEON_GEM_DOMAIN_VRAM, 0, NULL,
&vm->page_directory);
if (r)
return r;
@@ -1060,11 +1144,11 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
struct radeon_bo_va *bo_va, *tmp;
int i, r;
- if (!list_empty(&vm->va)) {
+ if (!RB_EMPTY_ROOT(&vm->va)) {
dev_err(rdev->dev, "still active bo inside vm\n");
}
- list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
- list_del_init(&bo_va->vm_list);
+ rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) {
+ interval_tree_remove(&bo_va->it, &vm->va);
r = radeon_bo_reserve(bo_va->bo, false);
if (!r) {
list_del_init(&bo_va->bo_list);
@@ -1072,8 +1156,10 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
kfree(bo_va);
}
}
- list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
+ list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+ radeon_bo_unref(&bo_va->bo);
kfree(bo_va);
+ }
for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
radeon_bo_unref(&vm->page_tables[i].bo);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index a0f96decece3..6c1fc339d228 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -109,7 +109,6 @@ int rs400_gart_enable(struct radeon_device *rdev)
uint32_t size_reg;
uint32_t tmp;
- radeon_gart_restore(rdev);
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
@@ -209,17 +208,24 @@ void rs400_gart_fini(struct radeon_device *rdev)
radeon_gart_table_ram_free(rdev);
}
+#define RS400_PTE_UNSNOOPED (1 << 0)
#define RS400_PTE_WRITEABLE (1 << 2)
#define RS400_PTE_READABLE (1 << 3)
-void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr)
+void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t addr, uint32_t flags)
{
uint32_t entry;
u32 *gtt = rdev->gart.ptr;
entry = (lower_32_bits(addr) & PAGE_MASK) |
- ((upper_32_bits(addr) & 0xff) << 4) |
- RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
+ ((upper_32_bits(addr) & 0xff) << 4);
+ if (flags & RADEON_GART_PAGE_READ)
+ addr |= RS400_PTE_READABLE;
+ if (flags & RADEON_GART_PAGE_WRITE)
+ addr |= RS400_PTE_WRITEABLE;
+ if (!(flags & RADEON_GART_PAGE_SNOOP))
+ entry |= RS400_PTE_UNSNOOPED;
entry = cpu_to_le32(entry);
gtt[i] = entry;
}
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d1a35cb1c91d..5f6db4629aaa 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -555,7 +555,6 @@ static int rs600_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Enable bus master */
tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
WREG32(RADEON_BUS_CNTL, tmp);
@@ -626,15 +625,21 @@ static void rs600_gart_fini(struct radeon_device *rdev)
radeon_gart_table_vram_free(rdev);
}
-void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr)
+void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t addr, uint32_t flags)
{
void __iomem *ptr = (void *)rdev->gart.ptr;
addr = addr & 0xFFFFFFFFFFFFF000ULL;
- if (addr == rdev->dummy_page.addr)
- addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED;
- else
- addr |= R600_PTE_GART;
+ addr |= R600_PTE_SYSTEM;
+ if (flags & RADEON_GART_PAGE_VALID)
+ addr |= R600_PTE_VALID;
+ if (flags & RADEON_GART_PAGE_READ)
+ addr |= R600_PTE_READABLE;
+ if (flags & RADEON_GART_PAGE_WRITE)
+ addr |= R600_PTE_WRITEABLE;
+ if (flags & RADEON_GART_PAGE_SNOOP)
+ addr |= R600_PTE_SNOOPED;
writeq(addr, ptr + (i * 8));
}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index da8703d8d455..2983f17ea1b3 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -900,7 +900,6 @@ static int rv770_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 9e854fd016da..011779bd2b3d 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -42,6 +42,14 @@ MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
+
+MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
+MODULE_FIRMWARE("radeon/tahiti_me.bin");
+MODULE_FIRMWARE("radeon/tahiti_ce.bin");
+MODULE_FIRMWARE("radeon/tahiti_mc.bin");
+MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
+MODULE_FIRMWARE("radeon/tahiti_smc.bin");
+
MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
@@ -49,6 +57,14 @@ MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
+
+MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
+MODULE_FIRMWARE("radeon/pitcairn_me.bin");
+MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
+MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
+MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
+
MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
MODULE_FIRMWARE("radeon/VERDE_me.bin");
MODULE_FIRMWARE("radeon/VERDE_ce.bin");
@@ -56,6 +72,14 @@ MODULE_FIRMWARE("radeon/VERDE_mc.bin");
MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
MODULE_FIRMWARE("radeon/VERDE_smc.bin");
+
+MODULE_FIRMWARE("radeon/verde_pfp.bin");
+MODULE_FIRMWARE("radeon/verde_me.bin");
+MODULE_FIRMWARE("radeon/verde_ce.bin");
+MODULE_FIRMWARE("radeon/verde_mc.bin");
+MODULE_FIRMWARE("radeon/verde_rlc.bin");
+MODULE_FIRMWARE("radeon/verde_smc.bin");
+
MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
MODULE_FIRMWARE("radeon/OLAND_me.bin");
MODULE_FIRMWARE("radeon/OLAND_ce.bin");
@@ -63,6 +87,14 @@ MODULE_FIRMWARE("radeon/OLAND_mc.bin");
MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
MODULE_FIRMWARE("radeon/OLAND_smc.bin");
+
+MODULE_FIRMWARE("radeon/oland_pfp.bin");
+MODULE_FIRMWARE("radeon/oland_me.bin");
+MODULE_FIRMWARE("radeon/oland_ce.bin");
+MODULE_FIRMWARE("radeon/oland_mc.bin");
+MODULE_FIRMWARE("radeon/oland_rlc.bin");
+MODULE_FIRMWARE("radeon/oland_smc.bin");
+
MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
MODULE_FIRMWARE("radeon/HAINAN_me.bin");
MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
@@ -71,6 +103,13 @@ MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
+MODULE_FIRMWARE("radeon/hainan_pfp.bin");
+MODULE_FIRMWARE("radeon/hainan_me.bin");
+MODULE_FIRMWARE("radeon/hainan_ce.bin");
+MODULE_FIRMWARE("radeon/hainan_mc.bin");
+MODULE_FIRMWARE("radeon/hainan_rlc.bin");
+MODULE_FIRMWARE("radeon/hainan_smc.bin");
+
static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
static void si_pcie_gen3_enable(struct radeon_device *rdev);
static void si_program_aspm(struct radeon_device *rdev);
@@ -1470,38 +1509,54 @@ static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
/* ucode loading */
int si_mc_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
+ const __be32 *fw_data = NULL;
+ const __le32 *new_fw_data = NULL;
u32 running, blackout = 0;
- u32 *io_mc_regs;
+ u32 *io_mc_regs = NULL;
+ const __le32 *new_io_mc_regs = NULL;
int i, regs_size, ucode_size;
if (!rdev->mc_fw)
return -EINVAL;
- ucode_size = rdev->mc_fw->size / 4;
+ if (rdev->new_fw) {
+ const struct mc_firmware_header_v1_0 *hdr =
+ (const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
+
+ radeon_ucode_print_mc_hdr(&hdr->header);
+ regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
+ new_io_mc_regs = (const __le32 *)
+ (rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ new_fw_data = (const __le32 *)
+ (rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ } else {
+ ucode_size = rdev->mc_fw->size / 4;
- switch (rdev->family) {
- case CHIP_TAHITI:
- io_mc_regs = (u32 *)&tahiti_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_PITCAIRN:
- io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_VERDE:
- default:
- io_mc_regs = (u32 *)&verde_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_OLAND:
- io_mc_regs = (u32 *)&oland_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
- case CHIP_HAINAN:
- io_mc_regs = (u32 *)&hainan_io_mc_regs;
- regs_size = TAHITI_IO_MC_REGS_SIZE;
- break;
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ io_mc_regs = (u32 *)&tahiti_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_PITCAIRN:
+ io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_VERDE:
+ default:
+ io_mc_regs = (u32 *)&verde_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_OLAND:
+ io_mc_regs = (u32 *)&oland_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_HAINAN:
+ io_mc_regs = (u32 *)&hainan_io_mc_regs;
+ regs_size = TAHITI_IO_MC_REGS_SIZE;
+ break;
+ }
+ fw_data = (const __be32 *)rdev->mc_fw->data;
}
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -1518,13 +1573,21 @@ int si_mc_load_microcode(struct radeon_device *rdev)
/* load mc io regs */
for (i = 0; i < regs_size; i++) {
- WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
- WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ if (rdev->new_fw) {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
+ WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
+ } else {
+ WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+ WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+ }
}
/* load the MC ucode */
- fw_data = (const __be32 *)rdev->mc_fw->data;
- for (i = 0; i < ucode_size; i++)
- WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+ for (i = 0; i < ucode_size; i++) {
+ if (rdev->new_fw)
+ WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
+ else
+ WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+ }
/* put the engine back into the active state */
WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
@@ -1553,18 +1616,19 @@ int si_mc_load_microcode(struct radeon_device *rdev)
static int si_init_microcode(struct radeon_device *rdev)
{
const char *chip_name;
- const char *rlc_chip_name;
+ const char *new_chip_name;
size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
size_t smc_req_size, mc2_req_size;
char fw_name[30];
int err;
+ int new_fw = 0;
DRM_DEBUG("\n");
switch (rdev->family) {
case CHIP_TAHITI:
chip_name = "TAHITI";
- rlc_chip_name = "TAHITI";
+ new_chip_name = "tahiti";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1575,7 +1639,7 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_PITCAIRN:
chip_name = "PITCAIRN";
- rlc_chip_name = "PITCAIRN";
+ new_chip_name = "pitcairn";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1586,7 +1650,7 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_VERDE:
chip_name = "VERDE";
- rlc_chip_name = "VERDE";
+ new_chip_name = "verde";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1597,7 +1661,7 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_OLAND:
chip_name = "OLAND";
- rlc_chip_name = "OLAND";
+ new_chip_name = "oland";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1607,7 +1671,7 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_HAINAN:
chip_name = "HAINAN";
- rlc_chip_name = "HAINAN";
+ new_chip_name = "hainan";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
@@ -1618,86 +1682,178 @@ static int si_init_microcode(struct radeon_device *rdev)
default: BUG();
}
- DRM_INFO("Loading %s Microcode\n", chip_name);
+ DRM_INFO("Loading %s Microcode\n", new_chip_name);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->pfp_fw->size != pfp_req_size) {
- printk(KERN_ERR
- "si_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->pfp_fw->size, fw_name);
- err = -EINVAL;
- goto out;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+ err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->pfp_fw->size != pfp_req_size) {
+ printk(KERN_ERR
+ "si_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->pfp_fw->size, fw_name);
+ err = -EINVAL;
+ goto out;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->pfp_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->me_fw->size != me_req_size) {
- printk(KERN_ERR
- "si_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->me_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+ err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->me_fw->size != me_req_size) {
+ printk(KERN_ERR
+ "si_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->me_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->me_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->ce_fw->size != ce_req_size) {
- printk(KERN_ERR
- "si_cp: Bogus length %zu in firmware \"%s\"\n",
- rdev->ce_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
+ err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->ce_fw->size != ce_req_size) {
+ printk(KERN_ERR
+ "si_cp: Bogus length %zu in firmware \"%s\"\n",
+ rdev->ce_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->ce_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->rlc_fw->size != rlc_req_size) {
- printk(KERN_ERR
- "si_rlc: Bogus length %zu in firmware \"%s\"\n",
- rdev->rlc_fw->size, fw_name);
- err = -EINVAL;
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
+ err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ if (rdev->rlc_fw->size != rlc_req_size) {
+ printk(KERN_ERR
+ "si_rlc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->rlc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->rlc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
if (err) {
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
- if (err)
+ if (err) {
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
+ if (err)
+ goto out;
+ }
+ if ((rdev->mc_fw->size != mc_req_size) &&
+ (rdev->mc_fw->size != mc2_req_size)) {
+ printk(KERN_ERR
+ "si_mc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->mc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
+ } else {
+ err = radeon_ucode_validate(rdev->mc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
goto out;
+ } else {
+ new_fw++;
+ }
}
- if ((rdev->mc_fw->size != mc_req_size) &&
- (rdev->mc_fw->size != mc2_req_size)) {
- printk(KERN_ERR
- "si_mc: Bogus length %zu in firmware \"%s\"\n",
- rdev->mc_fw->size, fw_name);
- err = -EINVAL;
- }
- DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
if (err) {
- printk(KERN_ERR
- "smc: error loading firmware \"%s\"\n",
- fw_name);
- release_firmware(rdev->smc_fw);
- rdev->smc_fw = NULL;
- err = 0;
- } else if (rdev->smc_fw->size != smc_req_size) {
- printk(KERN_ERR
- "si_smc: Bogus length %zu in firmware \"%s\"\n",
- rdev->smc_fw->size, fw_name);
- err = -EINVAL;
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ err = 0;
+ } else if (rdev->smc_fw->size != smc_req_size) {
+ printk(KERN_ERR
+ "si_smc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->smc_fw->size, fw_name);
+ err = -EINVAL;
+ }
+ } else {
+ err = radeon_ucode_validate(rdev->smc_fw);
+ if (err) {
+ printk(KERN_ERR
+ "si_cp: validation failed for firmware \"%s\"\n",
+ fw_name);
+ goto out;
+ } else {
+ new_fw++;
+ }
}
+ if (new_fw == 0) {
+ rdev->new_fw = false;
+ } else if (new_fw < 6) {
+ printk(KERN_ERR "si_fw: mixing new and old firmware!\n");
+ err = -EINVAL;
+ } else {
+ rdev->new_fw = true;
+ }
out:
if (err) {
if (err != -EINVAL)
@@ -3282,34 +3438,77 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable)
static int si_cp_load_microcode(struct radeon_device *rdev)
{
- const __be32 *fw_data;
int i;
- if (!rdev->me_fw || !rdev->pfp_fw)
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
return -EINVAL;
si_cp_enable(rdev, false);
- /* PFP */
- fw_data = (const __be32 *)rdev->pfp_fw->data;
- WREG32(CP_PFP_UCODE_ADDR, 0);
- for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
- WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_PFP_UCODE_ADDR, 0);
-
- /* CE */
- fw_data = (const __be32 *)rdev->ce_fw->data;
- WREG32(CP_CE_UCODE_ADDR, 0);
- for (i = 0; i < SI_CE_UCODE_SIZE; i++)
- WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_CE_UCODE_ADDR, 0);
-
- /* ME */
- fw_data = (const __be32 *)rdev->me_fw->data;
- WREG32(CP_ME_RAM_WADDR, 0);
- for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
- WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
- WREG32(CP_ME_RAM_WADDR, 0);
+ if (rdev->new_fw) {
+ const struct gfx_firmware_header_v1_0 *pfp_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
+ const struct gfx_firmware_header_v1_0 *ce_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
+ const struct gfx_firmware_header_v1_0 *me_hdr =
+ (const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
+ const __le32 *fw_data;
+ u32 fw_size;
+
+ radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
+ radeon_ucode_print_gfx_hdr(&ce_hdr->header);
+ radeon_ucode_print_gfx_hdr(&me_hdr->header);
+
+ /* PFP */
+ fw_data = (const __le32 *)
+ (rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __le32 *)
+ (rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)
+ (rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
+ fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < fw_size; i++)
+ WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
+ } else {
+ const __be32 *fw_data;
+
+ /* PFP */
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ /* CE */
+ fw_data = (const __be32 *)rdev->ce_fw->data;
+ WREG32(CP_CE_UCODE_ADDR, 0);
+ for (i = 0; i < SI_CE_UCODE_SIZE; i++)
+ WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_CE_UCODE_ADDR, 0);
+
+ /* ME */
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_ME_RAM_WADDR, 0);
+ }
WREG32(CP_PFP_UCODE_ADDR, 0);
WREG32(CP_CE_UCODE_ADDR, 0);
@@ -4048,7 +4247,6 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
- radeon_gart_restore(rdev);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
@@ -4815,7 +5013,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
/* write new base address */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+ radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
if (vm->id < 8) {
@@ -5592,7 +5790,6 @@ static void si_enable_lbpw(struct radeon_device *rdev, bool enable)
static int si_rlc_resume(struct radeon_device *rdev)
{
u32 i;
- const __be32 *fw_data;
if (!rdev->rlc_fw)
return -EINVAL;
@@ -5615,10 +5812,26 @@ static int si_rlc_resume(struct radeon_device *rdev)
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
- fw_data = (const __be32 *)rdev->rlc_fw->data;
- for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
- WREG32(RLC_UCODE_ADDR, i);
- WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ if (rdev->new_fw) {
+ const struct rlc_firmware_header_v1_0 *hdr =
+ (const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
+ u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
+ const __le32 *fw_data = (const __le32 *)
+ (rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ radeon_ucode_print_rlc_hdr(&hdr->header);
+
+ for (i = 0; i < fw_size; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
+ }
+ } else {
+ const __be32 *fw_data =
+ (const __be32 *)rdev->rlc_fw->data;
+ for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
}
WREG32(RLC_UCODE_ADDR, 0);
@@ -6318,7 +6531,8 @@ restart_ih:
case 16: /* D5 page flip */
case 18: /* D6 page flip */
DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
- radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
+ if (radeon_use_pflipirq > 0)
+ radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
break;
case 42: /* HPD hotplug */
switch (src_data) {
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index e24c94b6d14d..716505129450 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -56,7 +56,41 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
}
/**
- * si_dma_vm_set_page - update the page tables using the DMA
+ * si_dma_vm_copy_pages - update PTEs by copying them from the GART
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @src: src addr where to copy from
+ * @count: number of page entries to update
+ *
+ * Update PTEs by copying them from the GART using the DMA (SI).
+ */
+void si_dma_vm_copy_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count)
+{
+ while (count) {
+ unsigned bytes = count * 8;
+ if (bytes > 0xFFFF8)
+ bytes = 0xFFFF8;
+
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
+ 1, 0, 0, bytes);
+ ib->ptr[ib->length_dw++] = lower_32_bits(pe);
+ ib->ptr[ib->length_dw++] = lower_32_bits(src);
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
+
+ pe += bytes;
+ src += bytes;
+ count -= bytes / 8;
+ }
+}
+
+/**
+ * si_dma_vm_write_pages - update PTEs by writing them manually
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
@@ -66,83 +100,89 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
* @incr: increase next addr by incr bytes
* @flags: access flags
*
- * Update the page tables using the DMA (SI).
+ * Update PTEs by writing them manually using the DMA (SI).
*/
-void si_dma_vm_set_page(struct radeon_device *rdev,
- struct radeon_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags)
+void si_dma_vm_write_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
{
uint64_t value;
unsigned ndw;
- trace_radeon_vm_set_page(pe, addr, count, incr, flags);
-
- if (flags == R600_PTE_GART) {
- uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
- while (count) {
- unsigned bytes = count * 8;
- if (bytes > 0xFFFF8)
- bytes = 0xFFFF8;
-
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
- 1, 0, 0, bytes);
- ib->ptr[ib->length_dw++] = lower_32_bits(pe);
- ib->ptr[ib->length_dw++] = lower_32_bits(src);
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
-
- pe += bytes;
- src += bytes;
- count -= bytes / 8;
- }
- } else if (flags & R600_PTE_SYSTEM) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
- addr += incr;
- value |= flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- if (flags & R600_PTE_VALID)
+ } else if (flags & R600_PTE_VALID) {
value = addr;
- else
+ } else {
value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
+ }
+ addr += incr;
+ value |= flags;
+ ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- pe += ndw * 4;
- addr += (ndw / 2) * incr;
- count -= ndw / 2;
}
}
- while (ib->length_dw & 0x7)
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+}
+
+/**
+ * si_dma_vm_set_pages - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the DMA (SI).
+ */
+void si_dma_vm_set_pages(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint64_t value;
+ unsigned ndw;
+
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & R600_PTE_VALID)
+ value = addr;
+ else
+ value = 0;
+
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
}
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 58918868f894..70e61ffeace2 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3812,6 +3812,27 @@ void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
voltage_table->count = max_voltage_steps;
}
+static int si_get_svi2_voltage_table(struct radeon_device *rdev,
+ struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
+ struct atom_voltage_table *voltage_table)
+{
+ u32 i;
+
+ if (voltage_dependency_table == NULL)
+ return -EINVAL;
+
+ voltage_table->mask_low = 0;
+ voltage_table->phase_delay = 0;
+
+ voltage_table->count = voltage_dependency_table->count;
+ for (i = 0; i < voltage_table->count; i++) {
+ voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
+ voltage_table->entries[i].smio_low = 0;
+ }
+
+ return 0;
+}
+
static int si_construct_voltage_tables(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
@@ -3819,15 +3840,25 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
struct si_power_info *si_pi = si_get_pi(rdev);
int ret;
- ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
- VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
- if (ret)
- return ret;
+ if (pi->voltage_control) {
+ ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
+ VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
+ if (ret)
+ return ret;
- if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
- si_trim_voltage_table_to_fit_state_table(rdev,
- SISLANDS_MAX_NO_VREG_STEPS,
- &eg_pi->vddc_voltage_table);
+ if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
+ si_trim_voltage_table_to_fit_state_table(rdev,
+ SISLANDS_MAX_NO_VREG_STEPS,
+ &eg_pi->vddc_voltage_table);
+ } else if (si_pi->voltage_control_svi2) {
+ ret = si_get_svi2_voltage_table(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &eg_pi->vddc_voltage_table);
+ if (ret)
+ return ret;
+ } else {
+ return -EINVAL;
+ }
if (eg_pi->vddci_control) {
ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
@@ -3840,6 +3871,13 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
SISLANDS_MAX_NO_VREG_STEPS,
&eg_pi->vddci_voltage_table);
}
+ if (si_pi->vddci_control_svi2) {
+ ret = si_get_svi2_voltage_table(rdev,
+ &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &eg_pi->vddci_voltage_table);
+ if (ret)
+ return ret;
+ }
if (pi->mvdd_control) {
ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
@@ -3893,46 +3931,55 @@ static int si_populate_smc_voltage_tables(struct radeon_device *rdev,
struct si_power_info *si_pi = si_get_pi(rdev);
u8 i;
- if (eg_pi->vddc_voltage_table.count) {
- si_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
- table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
- cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
-
- for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
- if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
- table->maxVDDCIndexInPPTable = i;
- break;
+ if (si_pi->voltage_control_svi2) {
+ si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc,
+ si_pi->svc_gpio_id);
+ si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd,
+ si_pi->svd_gpio_id);
+ si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type,
+ 2);
+ } else {
+ if (eg_pi->vddc_voltage_table.count) {
+ si_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
+ table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+ cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+
+ for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
+ if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
+ table->maxVDDCIndexInPPTable = i;
+ break;
+ }
}
}
- }
- if (eg_pi->vddci_voltage_table.count) {
- si_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
+ if (eg_pi->vddci_voltage_table.count) {
+ si_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
- table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
- cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
- }
+ table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
+ cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
+ }
- if (si_pi->mvdd_voltage_table.count) {
- si_populate_smc_voltage_table(rdev, &si_pi->mvdd_voltage_table, table);
+ if (si_pi->mvdd_voltage_table.count) {
+ si_populate_smc_voltage_table(rdev, &si_pi->mvdd_voltage_table, table);
- table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
- cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
- }
+ table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
+ cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
+ }
- if (si_pi->vddc_phase_shed_control) {
- if (si_validate_phase_shedding_tables(rdev, &si_pi->vddc_phase_shed_table,
- &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
- si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
+ if (si_pi->vddc_phase_shed_control) {
+ if (si_validate_phase_shedding_tables(rdev, &si_pi->vddc_phase_shed_table,
+ &rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
+ si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
- table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
- cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
+ table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
+ cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
- si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
- (u32)si_pi->vddc_phase_shed_table.phase_delay);
- } else {
- si_pi->vddc_phase_shed_control = false;
+ si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
+ (u32)si_pi->vddc_phase_shed_table.phase_delay);
+ } else {
+ si_pi->vddc_phase_shed_control = false;
+ }
}
}
@@ -5798,16 +5845,17 @@ int si_dpm_enable(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct si_power_info *si_pi = si_get_pi(rdev);
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
int ret;
if (si_is_smc_running(rdev))
return -EINVAL;
- if (pi->voltage_control)
+ if (pi->voltage_control || si_pi->voltage_control_svi2)
si_enable_voltage_control(rdev, true);
if (pi->mvdd_control)
si_get_mvdd_configuration(rdev);
- if (pi->voltage_control) {
+ if (pi->voltage_control || si_pi->voltage_control_svi2) {
ret = si_construct_voltage_tables(rdev);
if (ret) {
DRM_ERROR("si_construct_voltage_tables failed\n");
@@ -6406,16 +6454,32 @@ int si_dpm_init(struct radeon_device *rdev)
ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
pi->voltage_control =
- radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_GPIO_LUT);
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ VOLTAGE_OBJ_GPIO_LUT);
+ if (!pi->voltage_control) {
+ si_pi->voltage_control_svi2 =
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ VOLTAGE_OBJ_SVID2);
+ if (si_pi->voltage_control_svi2)
+ radeon_atom_get_svi2_info(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ &si_pi->svd_gpio_id, &si_pi->svc_gpio_id);
+ }
pi->mvdd_control =
- radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, VOLTAGE_OBJ_GPIO_LUT);
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC,
+ VOLTAGE_OBJ_GPIO_LUT);
eg_pi->vddci_control =
- radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, VOLTAGE_OBJ_GPIO_LUT);
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+ VOLTAGE_OBJ_GPIO_LUT);
+ if (!eg_pi->vddci_control)
+ si_pi->vddci_control_svi2 =
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
+ VOLTAGE_OBJ_SVID2);
si_pi->vddc_phase_shed_control =
- radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT);
+ radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
+ VOLTAGE_OBJ_PHASE_LUT);
rv770_get_engine_memory_ss(rdev);
diff --git a/drivers/gpu/drm/radeon/si_dpm.h b/drivers/gpu/drm/radeon/si_dpm.h
index 4ce5032cdf49..8b5c06a0832d 100644
--- a/drivers/gpu/drm/radeon/si_dpm.h
+++ b/drivers/gpu/drm/radeon/si_dpm.h
@@ -170,6 +170,8 @@ struct si_power_info {
bool vddc_phase_shed_control;
bool pspp_notify_required;
bool sclk_deep_sleep_above_low;
+ bool voltage_control_svi2;
+ bool vddci_control_svi2;
/* smc offsets */
u32 sram_end;
u32 state_table_start;
@@ -192,6 +194,9 @@ struct si_power_info {
SMC_SIslands_MCRegisters smc_mc_reg_table;
SISLANDS_SMC_STATETABLE smc_statetable;
PP_SIslands_PAPMParameters papm_parm;
+ /* SVI2 */
+ u8 svd_gpio_id;
+ u8 svc_gpio_id;
};
#define SISLANDS_INITIAL_STATE_ARB_INDEX 0
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
index e80efcf0c230..73dbc79c959d 100644
--- a/drivers/gpu/drm/radeon/si_smc.c
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -219,36 +219,48 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
if (!rdev->smc_fw)
return -EINVAL;
- switch (rdev->family) {
- case CHIP_TAHITI:
- ucode_start_address = TAHITI_SMC_UCODE_START;
- ucode_size = TAHITI_SMC_UCODE_SIZE;
- break;
- case CHIP_PITCAIRN:
- ucode_start_address = PITCAIRN_SMC_UCODE_START;
- ucode_size = PITCAIRN_SMC_UCODE_SIZE;
- break;
- case CHIP_VERDE:
- ucode_start_address = VERDE_SMC_UCODE_START;
- ucode_size = VERDE_SMC_UCODE_SIZE;
- break;
- case CHIP_OLAND:
- ucode_start_address = OLAND_SMC_UCODE_START;
- ucode_size = OLAND_SMC_UCODE_SIZE;
- break;
- case CHIP_HAINAN:
- ucode_start_address = HAINAN_SMC_UCODE_START;
- ucode_size = HAINAN_SMC_UCODE_SIZE;
- break;
- default:
- DRM_ERROR("unknown asic in smc ucode loader\n");
- BUG();
+ if (rdev->new_fw) {
+ const struct smc_firmware_header_v1_0 *hdr =
+ (const struct smc_firmware_header_v1_0 *)rdev->smc_fw->data;
+
+ radeon_ucode_print_smc_hdr(&hdr->header);
+
+ ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
+ ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
+ src = (const u8 *)
+ (rdev->smc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ } else {
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ ucode_start_address = TAHITI_SMC_UCODE_START;
+ ucode_size = TAHITI_SMC_UCODE_SIZE;
+ break;
+ case CHIP_PITCAIRN:
+ ucode_start_address = PITCAIRN_SMC_UCODE_START;
+ ucode_size = PITCAIRN_SMC_UCODE_SIZE;
+ break;
+ case CHIP_VERDE:
+ ucode_start_address = VERDE_SMC_UCODE_START;
+ ucode_size = VERDE_SMC_UCODE_SIZE;
+ break;
+ case CHIP_OLAND:
+ ucode_start_address = OLAND_SMC_UCODE_START;
+ ucode_size = OLAND_SMC_UCODE_SIZE;
+ break;
+ case CHIP_HAINAN:
+ ucode_start_address = HAINAN_SMC_UCODE_START;
+ ucode_size = HAINAN_SMC_UCODE_SIZE;
+ break;
+ default:
+ DRM_ERROR("unknown asic in smc ucode loader\n");
+ BUG();
+ }
+ src = (const u8 *)rdev->smc_fw->data;
}
if (ucode_size & 3)
return -EINVAL;
- src = (const u8 *)rdev->smc_fw->data;
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
WREG32(SMC_IND_INDEX_0, ucode_start_address);
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
index 10e945a49479..623a0b1e2d9d 100644
--- a/drivers/gpu/drm/radeon/sislands_smc.h
+++ b/drivers/gpu/drm/radeon/sislands_smc.h
@@ -241,6 +241,9 @@ typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width 0xF4
#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen 0xFC
#define SI_SMC_SOFT_REGISTER_vr_hot_gpio 0x100
+#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type 0x118
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c
+#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 792fd1d20e86..fda64b7b73e8 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -187,7 +187,7 @@ static struct drm_driver rcar_du_driver = {
* Power management
*/
-#if CONFIG_PM_SLEEP
+#ifdef CONFIG_PM_SLEEP
static int rcar_du_pm_suspend(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index a87edfac111f..76026104d000 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -135,7 +135,9 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
{
struct rcar_du_device *rcdu = dev->dev_private;
const struct rcar_du_format_info *format;
+ unsigned int max_pitch;
unsigned int align;
+ unsigned int bpp;
format = rcar_du_format_info(mode_cmd->pixel_format);
if (format == NULL) {
@@ -144,13 +146,20 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-EINVAL);
}
+ /*
+ * The pitch and alignment constraints are expressed in pixels on the
+ * hardware side and in bytes in the DRM API.
+ */
+ bpp = format->planes == 2 ? 1 : format->bpp / 8;
+ max_pitch = 4096 * bpp;
+
if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
align = 128;
else
- align = 16 * format->bpp / 8;
+ align = 16 * bpp;
if (mode_cmd->pitches[0] & (align - 1) ||
- mode_cmd->pitches[0] >= 8192) {
+ mode_cmd->pitches[0] >= max_pitch) {
dev_dbg(dev->dev, "invalid pitch value %u\n",
mode_cmd->pitches[0]);
return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 289048d1c7b2..21426bd234eb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -64,7 +64,7 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = {
static void rcar_du_lvds_connector_destroy(struct drm_connector *connector)
{
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
@@ -105,7 +105,7 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
return ret;
drm_connector_helper_add(connector, &connector_helper_funcs);
- ret = drm_sysfs_connector_add(connector);
+ ret = drm_connector_register(connector);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index ccfe64c7188f..8af3944d31b9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -32,7 +32,7 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = {
static void rcar_du_vga_connector_destroy(struct drm_connector *connector)
{
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
@@ -70,7 +70,7 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
return ret;
drm_connector_helper_add(connector, &connector_helper_funcs);
- ret = drm_sysfs_connector_add(connector);
+ ret = drm_connector_register(connector);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index faf176b2daf9..47875de89010 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -692,7 +692,7 @@ static void shmob_drm_connector_destroy(struct drm_connector *connector)
struct shmob_drm_connector *scon = to_shmob_connector(connector);
shmob_drm_backlight_exit(scon);
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
@@ -726,7 +726,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
return ret;
drm_connector_helper_add(connector, &connector_helper_funcs);
- ret = drm_sysfs_connector_add(connector);
+ ret = drm_connector_register(connector);
if (ret < 0)
goto err_cleanup;
@@ -749,7 +749,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
err_backlight:
shmob_drm_backlight_exit(&sdev->connector);
err_sysfs:
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
err_cleanup:
drm_connector_cleanup(connector);
return ret;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 82c84c7fd4f6..ff4ba483b602 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -297,7 +297,7 @@ static struct drm_driver shmob_drm_driver = {
* Power management
*/
-#if CONFIG_PM_SLEEP
+#ifdef CONFIG_PM_SLEEP
static int shmob_drm_pm_suspend(struct device *dev)
{
struct shmob_drm_device *sdev = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
new file mode 100644
index 000000000000..2d9d4252d598
--- /dev/null
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -0,0 +1,14 @@
+config DRM_STI
+ tristate "DRM Support for STMicroelectronics SoC stiH41x Series"
+ depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM)
+ select DRM_KMS_HELPER
+ select DRM_GEM_CMA_HELPER
+ select DRM_KMS_CMA_HELPER
+ help
+ Choose this option to enable DRM on STM stiH41x chipset
+
+config DRM_STI_FBDEV
+ bool "DRM frame buffer device for STMicroelectronics SoC stiH41x Serie"
+ depends on DRM_STI
+ help
+ Choose this option to enable FBDEV on top of DRM for STM stiH41x chipset
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
new file mode 100644
index 000000000000..04ac2ceef27f
--- /dev/null
+++ b/drivers/gpu/drm/sti/Makefile
@@ -0,0 +1,21 @@
+sticompositor-y := \
+ sti_layer.o \
+ sti_mixer.o \
+ sti_gdp.o \
+ sti_vid.o \
+ sti_compositor.o \
+ sti_drm_crtc.o \
+ sti_drm_plane.o
+
+stihdmi-y := sti_hdmi.o \
+ sti_hdmi_tx3g0c55phy.o \
+ sti_hdmi_tx3g4c28phy.o \
+
+obj-$(CONFIG_DRM_STI) = \
+ sti_vtg.o \
+ sti_vtac.o \
+ stihdmi.o \
+ sti_hda.o \
+ sti_tvout.o \
+ sticompositor.o \
+ sti_drm_drv.o \ No newline at end of file
diff --git a/drivers/gpu/drm/sti/NOTES b/drivers/gpu/drm/sti/NOTES
new file mode 100644
index 000000000000..57e257969198
--- /dev/null
+++ b/drivers/gpu/drm/sti/NOTES
@@ -0,0 +1,58 @@
+1. stiH display hardware IP
+---------------------------
+The STMicroelectronics stiH SoCs use a common chain of HW display IP blocks:
+- The High Quality Video Display Processor (HQVDP) gets video frames from a
+ video decoder and does high quality video processing, including scaling.
+
+- The Compositor is a multiplane, dual-mixer (Main & Aux) digital processor. It
+ has several inputs:
+ - The graphics planes are internally processed by the Generic Display
+ Pipeline (GDP).
+ - The video plug (VID) connects to the HQVDP output.
+ - The cursor handles ... a cursor.
+- The TV OUT pre-formats (convert, clip, round) the compositor output data
+- The HDMI / DVO / HD Analog / SD analog IP builds the video signals
+ - DVO (Digital Video Output) handles a 24bits parallel signal
+ - The HD analog signal is typically driven by a YCbCr cable, supporting up to
+ 1080i mode.
+ - The SD analog signal is typically used for legacy TV
+- The VTG (Video Timing Generators) build Vsync signals used by the other HW IP
+Note that some stiH drivers support only a subset of thee HW IP.
+
+ .-------------. .-----------. .-----------.
+GPU >-------------+GDP Main | | +---+ HDMI +--> HDMI
+GPU >-------------+GDP mixer+---+ | :===========:
+GPU >-------------+Cursor | | +---+ DVO +--> 24b//
+ ------- | COMPOSITOR | | TV OUT | :===========:
+ | | | | | +---+ HD analog +--> YCbCr
+Vid >--+ HQVDP +--+VID Aux +---+ | :===========:
+dec | | | mixer| | +---+ SD analog +--> CVBS
+ '-------' '-------------' '-----------' '-----------'
+ .-----------.
+ | main+--> Vsync
+ | VTG |
+ | aux+--> Vsync
+ '-----------'
+
+2. DRM / HW mapping
+-------------------
+These IP are mapped to the DRM objects as following:
+- The CRTCs are mapped to the Compositor Main and Aux Mixers
+- The Framebuffers and planes are mapped to the Compositor GDP (non video
+ buffers) and to HQVDP+VID (video buffers)
+- The Cursor is mapped to the Compositor Cursor
+- The Encoders are mapped to the TVOut
+- The Bridges/Connectors are mapped to the HDMI / DVO / HD Analog / SD analog
+
+FB & planes Cursor CRTC Encoders Bridges/Connectors
+ | | | | |
+ | | | | |
+ | .-------------. | .-----------. .-----------. |
+ +------------> |GDP | Main | | | +-> | | HDMI | <-+
+ +------------> |GDP v mixer|<+ | | | :===========: |
+ | |Cursor | | | +-> | | DVO | <-+
+ | ------- | COMPOSITOR | | |TV OUT | | :===========: |
+ | | | | | | | +-> | | HD analog | <-+
+ +-> | HQVDP | |VID Aux |<+ | | | :===========: |
+ | | | mixer| | +-> | | SD analog | <-+
+ '-------' '-------------' '-----------' '-----------'
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
new file mode 100644
index 000000000000..390d93e9a06c
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <drm/drmP.h>
+
+#include "sti_compositor.h"
+#include "sti_drm_crtc.h"
+#include "sti_drm_drv.h"
+#include "sti_drm_plane.h"
+#include "sti_gdp.h"
+#include "sti_vtg.h"
+
+/*
+ * stiH407 compositor properties
+ */
+struct sti_compositor_data stih407_compositor_data = {
+ .nb_subdev = 6,
+ .subdev_desc = {
+ {STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100},
+ {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
+ {STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300},
+ {STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400},
+ {STI_VID_SUBDEV, (int)STI_VID_0, 0x700},
+ {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}
+ },
+};
+
+/*
+ * stiH416 compositor properties
+ * Note:
+ * on stih416 MIXER_AUX has a different base address from MIXER_MAIN
+ * Moreover, GDPx is different for Main and Aux Mixer. So this subdev map does
+ * not fit for stiH416 if we want to enable the MIXER_AUX.
+ */
+struct sti_compositor_data stih416_compositor_data = {
+ .nb_subdev = 3,
+ .subdev_desc = {
+ {STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100},
+ {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
+ {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}
+ },
+};
+
+static int sti_compositor_init_subdev(struct sti_compositor *compo,
+ struct sti_compositor_subdev_descriptor *desc,
+ unsigned int array_size)
+{
+ unsigned int i, mixer_id = 0, layer_id = 0;
+
+ for (i = 0; i < array_size; i++) {
+ switch (desc[i].type) {
+ case STI_MIXER_MAIN_SUBDEV:
+ case STI_MIXER_AUX_SUBDEV:
+ compo->mixer[mixer_id++] =
+ sti_mixer_create(compo->dev, desc[i].id,
+ compo->regs + desc[i].offset);
+ break;
+ case STI_GPD_SUBDEV:
+ case STI_VID_SUBDEV:
+ compo->layer[layer_id++] =
+ sti_layer_create(compo->dev, desc[i].id,
+ compo->regs + desc[i].offset);
+ break;
+ /* case STI_CURSOR_SUBDEV : TODO */
+ default:
+ DRM_ERROR("Unknow subdev compoment type\n");
+ return 1;
+ }
+
+ }
+ compo->nb_mixers = mixer_id;
+ compo->nb_layers = layer_id;
+
+ return 0;
+}
+
+static int sti_compositor_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ unsigned int i, crtc = 0, plane = 0;
+ struct sti_drm_private *dev_priv = drm_dev->dev_private;
+ struct drm_plane *cursor = NULL;
+ struct drm_plane *primary = NULL;
+
+ dev_priv->compo = compo;
+
+ for (i = 0; i < compo->nb_layers; i++) {
+ if (compo->layer[i]) {
+ enum sti_layer_desc desc = compo->layer[i]->desc;
+ enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK;
+ enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
+
+ if (compo->mixer[crtc])
+ plane_type = DRM_PLANE_TYPE_PRIMARY;
+
+ switch (type) {
+ case STI_CUR:
+ cursor = sti_drm_plane_init(drm_dev,
+ compo->layer[i],
+ (1 << crtc) - 1,
+ DRM_PLANE_TYPE_CURSOR);
+ break;
+ case STI_GDP:
+ case STI_VID:
+ primary = sti_drm_plane_init(drm_dev,
+ compo->layer[i],
+ (1 << crtc) - 1, plane_type);
+ plane++;
+ break;
+ case STI_BCK:
+ break;
+ }
+
+ /* The first planes are reserved for primary planes*/
+ if (compo->mixer[crtc]) {
+ sti_drm_crtc_init(drm_dev, compo->mixer[crtc],
+ primary, cursor);
+ crtc++;
+ cursor = NULL;
+ }
+ }
+ }
+
+ drm_vblank_init(drm_dev, crtc);
+ /* Allow usage of vblank without having to call drm_irq_install */
+ drm_dev->irq_enabled = 1;
+
+ DRM_DEBUG_DRIVER("Initialized %d DRM CRTC(s) and %d DRM plane(s)\n",
+ crtc, plane);
+ DRM_DEBUG_DRIVER("DRM plane(s) for VID/VDP not created yet\n");
+
+ return 0;
+}
+
+static void sti_compositor_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ /* do nothing */
+}
+
+static const struct component_ops sti_compositor_ops = {
+ .bind = sti_compositor_bind,
+ .unbind = sti_compositor_unbind,
+};
+
+static const struct of_device_id compositor_of_match[] = {
+ {
+ .compatible = "st,stih416-compositor",
+ .data = &stih416_compositor_data,
+ }, {
+ .compatible = "st,stih407-compositor",
+ .data = &stih407_compositor_data,
+ }, {
+ /* end node */
+ }
+};
+MODULE_DEVICE_TABLE(of, compositor_of_match);
+
+static int sti_compositor_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *vtg_np;
+ struct sti_compositor *compo;
+ struct resource *res;
+ int err;
+
+ compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL);
+ if (!compo) {
+ DRM_ERROR("Failed to allocate compositor context\n");
+ return -ENOMEM;
+ }
+ compo->dev = dev;
+ compo->vtg_vblank_nb.notifier_call = sti_drm_crtc_vblank_cb;
+
+ /* populate data structure depending on compatibility */
+ BUG_ON(!of_match_node(compositor_of_match, np)->data);
+
+ memcpy(&compo->data, of_match_node(compositor_of_match, np)->data,
+ sizeof(struct sti_compositor_data));
+
+ /* Get Memory ressources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ DRM_ERROR("Get memory resource failed\n");
+ return -ENXIO;
+ }
+ compo->regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (compo->regs == NULL) {
+ DRM_ERROR("Register mapping failed\n");
+ return -ENXIO;
+ }
+
+ /* Get clock resources */
+ compo->clk_compo_main = devm_clk_get(dev, "compo_main");
+ if (IS_ERR(compo->clk_compo_main)) {
+ DRM_ERROR("Cannot get compo_main clock\n");
+ return PTR_ERR(compo->clk_compo_main);
+ }
+
+ compo->clk_compo_aux = devm_clk_get(dev, "compo_aux");
+ if (IS_ERR(compo->clk_compo_aux)) {
+ DRM_ERROR("Cannot get compo_aux clock\n");
+ return PTR_ERR(compo->clk_compo_aux);
+ }
+
+ compo->clk_pix_main = devm_clk_get(dev, "pix_main");
+ if (IS_ERR(compo->clk_pix_main)) {
+ DRM_ERROR("Cannot get pix_main clock\n");
+ return PTR_ERR(compo->clk_pix_main);
+ }
+
+ compo->clk_pix_aux = devm_clk_get(dev, "pix_aux");
+ if (IS_ERR(compo->clk_pix_aux)) {
+ DRM_ERROR("Cannot get pix_aux clock\n");
+ return PTR_ERR(compo->clk_pix_aux);
+ }
+
+ /* Get reset resources */
+ compo->rst_main = devm_reset_control_get(dev, "compo-main");
+ /* Take compo main out of reset */
+ if (!IS_ERR(compo->rst_main))
+ reset_control_deassert(compo->rst_main);
+
+ compo->rst_aux = devm_reset_control_get(dev, "compo-aux");
+ /* Take compo aux out of reset */
+ if (!IS_ERR(compo->rst_aux))
+ reset_control_deassert(compo->rst_aux);
+
+ vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
+ if (vtg_np)
+ compo->vtg_main = of_vtg_find(vtg_np);
+
+ vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1);
+ if (vtg_np)
+ compo->vtg_aux = of_vtg_find(vtg_np);
+
+ /* Initialize compositor subdevices */
+ err = sti_compositor_init_subdev(compo, compo->data.subdev_desc,
+ compo->data.nb_subdev);
+ if (err)
+ return err;
+
+ platform_set_drvdata(pdev, compo);
+
+ return component_add(&pdev->dev, &sti_compositor_ops);
+}
+
+static int sti_compositor_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sti_compositor_ops);
+ return 0;
+}
+
+static struct platform_driver sti_compositor_driver = {
+ .driver = {
+ .name = "sti-compositor",
+ .owner = THIS_MODULE,
+ .of_match_table = compositor_of_match,
+ },
+ .probe = sti_compositor_probe,
+ .remove = sti_compositor_remove,
+};
+
+module_platform_driver(sti_compositor_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
new file mode 100644
index 000000000000..3ea19db72e0f
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_COMPOSITOR_H_
+#define _STI_COMPOSITOR_H_
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+
+#include "sti_layer.h"
+#include "sti_mixer.h"
+
+#define WAIT_NEXT_VSYNC_MS 50 /*ms*/
+
+#define STI_MAX_LAYER 8
+#define STI_MAX_MIXER 2
+
+enum sti_compositor_subdev_type {
+ STI_MIXER_MAIN_SUBDEV,
+ STI_MIXER_AUX_SUBDEV,
+ STI_GPD_SUBDEV,
+ STI_VID_SUBDEV,
+ STI_CURSOR_SUBDEV,
+};
+
+struct sti_compositor_subdev_descriptor {
+ enum sti_compositor_subdev_type type;
+ int id;
+ unsigned int offset;
+};
+
+/**
+ * STI Compositor data structure
+ *
+ * @nb_subdev: number of subdevices supported by the compositor
+ * @subdev_desc: subdev list description
+ */
+#define MAX_SUBDEV 9
+struct sti_compositor_data {
+ unsigned int nb_subdev;
+ struct sti_compositor_subdev_descriptor subdev_desc[MAX_SUBDEV];
+};
+
+/**
+ * STI Compositor structure
+ *
+ * @dev: driver device
+ * @regs: registers (main)
+ * @data: device data
+ * @clk_compo_main: clock for main compo
+ * @clk_compo_aux: clock for aux compo
+ * @clk_pix_main: pixel clock for main path
+ * @clk_pix_aux: pixel clock for aux path
+ * @rst_main: reset control of the main path
+ * @rst_aux: reset control of the aux path
+ * @mixer: array of mixers
+ * @vtg_main: vtg for main data path
+ * @vtg_aux: vtg for auxillary data path
+ * @layer: array of layers
+ * @nb_mixers: number of mixers for this compositor
+ * @nb_layers: number of layers (GDP,VID,...) for this compositor
+ * @enable: true if compositor is enable else false
+ * @vtg_vblank_nb: callback for VTG VSYNC notification
+ */
+struct sti_compositor {
+ struct device *dev;
+ void __iomem *regs;
+ struct sti_compositor_data data;
+ struct clk *clk_compo_main;
+ struct clk *clk_compo_aux;
+ struct clk *clk_pix_main;
+ struct clk *clk_pix_aux;
+ struct reset_control *rst_main;
+ struct reset_control *rst_aux;
+ struct sti_mixer *mixer[STI_MAX_MIXER];
+ struct sti_vtg *vtg_main;
+ struct sti_vtg *vtg_aux;
+ struct sti_layer *layer[STI_MAX_LAYER];
+ int nb_mixers;
+ int nb_layers;
+ bool enable;
+ struct notifier_block vtg_vblank_nb;
+};
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_drm_crtc.c
new file mode 100644
index 000000000000..d2ae0c0e13be
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_crtc.c
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "sti_compositor.h"
+#include "sti_drm_drv.h"
+#include "sti_drm_crtc.h"
+#include "sti_vtg.h"
+
+static void sti_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ DRM_DEBUG_KMS("\n");
+}
+
+static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct device *dev = mixer->dev;
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+
+ compo->enable = true;
+
+ /* Prepare and enable the compo IP clock */
+ if (mixer->id == STI_MIXER_MAIN) {
+ if (clk_prepare_enable(compo->clk_compo_main))
+ DRM_INFO("Failed to prepare/enable compo_main clk\n");
+ } else {
+ if (clk_prepare_enable(compo->clk_compo_aux))
+ DRM_INFO("Failed to prepare/enable compo_aux clk\n");
+ }
+}
+
+static void sti_drm_crtc_commit(struct drm_crtc *crtc)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct device *dev = mixer->dev;
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+ struct sti_layer *layer;
+
+ if ((!mixer || !compo)) {
+ DRM_ERROR("Can not find mixer or compositor)\n");
+ return;
+ }
+
+ /* get GDP which is reserved to the CRTC FB */
+ layer = to_sti_layer(crtc->primary);
+ if (layer)
+ sti_layer_commit(layer);
+ else
+ DRM_ERROR("Can not find CRTC dedicated plane (GDP0)\n");
+
+ /* Enable layer on mixer */
+ if (sti_mixer_set_layer_status(mixer, layer, true))
+ DRM_ERROR("Can not enable layer at mixer\n");
+}
+
+static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* accept the provided drm_display_mode, do not fix it up */
+ return true;
+}
+
+static int
+sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct device *dev = mixer->dev;
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+ struct sti_layer *layer;
+ struct clk *clk;
+ int rate = mode->clock * 1000;
+ int res;
+ unsigned int w, h;
+
+ DRM_DEBUG_KMS("CRTC:%d (%s) fb:%d mode:%d (%s)\n",
+ crtc->base.id, sti_mixer_to_str(mixer),
+ crtc->primary->fb->base.id, mode->base.id, mode->name);
+
+ DRM_DEBUG_KMS("%d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
+ mode->vrefresh, mode->clock,
+ mode->hdisplay,
+ mode->hsync_start, mode->hsync_end,
+ mode->htotal,
+ mode->vdisplay,
+ mode->vsync_start, mode->vsync_end,
+ mode->vtotal, mode->type, mode->flags);
+
+ /* Set rate and prepare/enable pixel clock */
+ if (mixer->id == STI_MIXER_MAIN)
+ clk = compo->clk_pix_main;
+ else
+ clk = compo->clk_pix_aux;
+
+ res = clk_set_rate(clk, rate);
+ if (res < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate);
+ return -EINVAL;
+ }
+ if (clk_prepare_enable(clk)) {
+ DRM_ERROR("Failed to prepare/enable pix clk\n");
+ return -EINVAL;
+ }
+
+ sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
+ compo->vtg_main : compo->vtg_aux, &crtc->mode);
+
+ /* a GDP is reserved to the CRTC FB */
+ layer = to_sti_layer(crtc->primary);
+ if (!layer) {
+ DRM_ERROR("Can not find GDP0)\n");
+ return -EINVAL;
+ }
+
+ /* copy the mode data adjusted by mode_fixup() into crtc->mode
+ * so that hardware can be set to proper mode
+ */
+ memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
+
+ res = sti_mixer_set_layer_depth(mixer, layer);
+ if (res) {
+ DRM_ERROR("Can not set layer depth\n");
+ return -EINVAL;
+ }
+ res = sti_mixer_active_video_area(mixer, &crtc->mode);
+ if (res) {
+ DRM_ERROR("Can not set active video area\n");
+ return -EINVAL;
+ }
+
+ w = crtc->primary->fb->width - x;
+ h = crtc->primary->fb->height - y;
+
+ return sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode,
+ mixer->id, 0, 0, w, h, x, y, w, h);
+}
+
+static int sti_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct sti_layer *layer;
+ unsigned int w, h;
+ int ret;
+
+ DRM_DEBUG_KMS("CRTC:%d (%s) fb:%d (%d,%d)\n",
+ crtc->base.id, sti_mixer_to_str(mixer),
+ crtc->primary->fb->base.id, x, y);
+
+ /* GDP is reserved to the CRTC FB */
+ layer = to_sti_layer(crtc->primary);
+ if (!layer) {
+ DRM_ERROR("Can not find GDP0)\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ w = crtc->primary->fb->width - crtc->x;
+ h = crtc->primary->fb->height - crtc->y;
+
+ ret = sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode,
+ mixer->id, 0, 0, w, h,
+ crtc->x, crtc->y, w, h);
+ if (ret) {
+ DRM_ERROR("Can not prepare layer\n");
+ goto out;
+ }
+
+ sti_drm_crtc_commit(crtc);
+out:
+ return ret;
+}
+
+static void sti_drm_crtc_load_lut(struct drm_crtc *crtc)
+{
+ /* do nothing */
+}
+
+static void sti_drm_crtc_disable(struct drm_crtc *crtc)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ struct device *dev = mixer->dev;
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+ struct sti_layer *layer;
+
+ if (!compo->enable)
+ return;
+
+ DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer));
+
+ /* Disable Background */
+ sti_mixer_set_background_status(mixer, false);
+
+ /* Disable GDP */
+ layer = to_sti_layer(crtc->primary);
+ if (!layer) {
+ DRM_ERROR("Cannot find GDP0\n");
+ return;
+ }
+
+ /* Disable layer at mixer level */
+ if (sti_mixer_set_layer_status(mixer, layer, false))
+ DRM_ERROR("Can not disable %s layer at mixer\n",
+ sti_layer_to_str(layer));
+
+ /* Wait a while to be sure that a Vsync event is received */
+ msleep(WAIT_NEXT_VSYNC_MS);
+
+ /* Then disable layer itself */
+ sti_layer_disable(layer);
+
+ drm_vblank_off(crtc->dev, mixer->id);
+
+ /* Disable pixel clock and compo IP clocks */
+ if (mixer->id == STI_MIXER_MAIN) {
+ clk_disable_unprepare(compo->clk_pix_main);
+ clk_disable_unprepare(compo->clk_compo_main);
+ } else {
+ clk_disable_unprepare(compo->clk_pix_aux);
+ clk_disable_unprepare(compo->clk_compo_aux);
+ }
+
+ compo->enable = false;
+}
+
+static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
+ .dpms = sti_drm_crtc_dpms,
+ .prepare = sti_drm_crtc_prepare,
+ .commit = sti_drm_crtc_commit,
+ .mode_fixup = sti_drm_crtc_mode_fixup,
+ .mode_set = sti_drm_crtc_mode_set,
+ .mode_set_base = sti_drm_crtc_mode_set_base,
+ .load_lut = sti_drm_crtc_load_lut,
+ .disable = sti_drm_crtc_disable,
+};
+
+static int sti_drm_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct drm_device *drm_dev = crtc->dev;
+ struct drm_framebuffer *old_fb;
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ unsigned long flags;
+ int ret;
+
+ DRM_DEBUG_KMS("fb %d --> fb %d\n",
+ crtc->primary->fb->base.id, fb->base.id);
+
+ mutex_lock(&drm_dev->struct_mutex);
+
+ old_fb = crtc->primary->fb;
+ crtc->primary->fb = fb;
+ ret = sti_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
+ if (ret) {
+ DRM_ERROR("failed\n");
+ crtc->primary->fb = old_fb;
+ goto out;
+ }
+
+ if (event) {
+ event->pipe = mixer->id;
+
+ ret = drm_vblank_get(drm_dev, event->pipe);
+ if (ret) {
+ DRM_ERROR("Cannot get vblank\n");
+ goto out;
+ }
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ if (mixer->pending_event) {
+ drm_vblank_put(drm_dev, event->pipe);
+ ret = -EBUSY;
+ } else {
+ mixer->pending_event = event;
+ }
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+ }
+out:
+ mutex_unlock(&drm_dev->struct_mutex);
+ return ret;
+}
+
+static void sti_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+ DRM_DEBUG_KMS("\n");
+ drm_crtc_cleanup(crtc);
+}
+
+static int sti_drm_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ uint64_t val)
+{
+ DRM_DEBUG_KMS("\n");
+ return 0;
+}
+
+int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct drm_device *drm_dev;
+ struct sti_compositor *compo =
+ container_of(nb, struct sti_compositor, vtg_vblank_nb);
+ int *crtc = data;
+ unsigned long flags;
+ struct sti_drm_private *priv;
+
+ drm_dev = compo->mixer[*crtc]->drm_crtc.dev;
+ priv = drm_dev->dev_private;
+
+ if ((event != VTG_TOP_FIELD_EVENT) &&
+ (event != VTG_BOTTOM_FIELD_EVENT)) {
+ DRM_ERROR("unknown event: %lu\n", event);
+ return -EINVAL;
+ }
+
+ drm_handle_vblank(drm_dev, *crtc);
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+ if (compo->mixer[*crtc]->pending_event) {
+ drm_send_vblank_event(drm_dev, -1,
+ compo->mixer[*crtc]->pending_event);
+ drm_vblank_put(drm_dev, *crtc);
+ compo->mixer[*crtc]->pending_event = NULL;
+ }
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+
+ return 0;
+}
+
+int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
+{
+ struct sti_drm_private *dev_priv = dev->dev_private;
+ struct sti_compositor *compo = dev_priv->compo;
+ struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
+
+ if (sti_vtg_register_client(crtc == STI_MIXER_MAIN ?
+ compo->vtg_main : compo->vtg_aux,
+ vtg_vblank_nb, crtc)) {
+ DRM_ERROR("Cannot register VTG notifier\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sti_drm_crtc_enable_vblank);
+
+void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
+{
+ struct sti_drm_private *priv = dev->dev_private;
+ struct sti_compositor *compo = priv->compo;
+ struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
+ unsigned long flags;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (sti_vtg_unregister_client(crtc == STI_MIXER_MAIN ?
+ compo->vtg_main : compo->vtg_aux, vtg_vblank_nb))
+ DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
+
+ /* free the resources of the pending requests */
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (compo->mixer[crtc]->pending_event) {
+ drm_vblank_put(dev, crtc);
+ compo->mixer[crtc]->pending_event = NULL;
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+}
+EXPORT_SYMBOL(sti_drm_crtc_disable_vblank);
+
+static struct drm_crtc_funcs sti_crtc_funcs = {
+ .set_config = drm_crtc_helper_set_config,
+ .page_flip = sti_drm_crtc_page_flip,
+ .destroy = sti_drm_crtc_destroy,
+ .set_property = sti_drm_crtc_set_property,
+};
+
+bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
+{
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+
+ if (mixer->id == STI_MIXER_MAIN)
+ return true;
+
+ return false;
+}
+
+int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
+ struct drm_plane *primary, struct drm_plane *cursor)
+{
+ struct drm_crtc *crtc = &mixer->drm_crtc;
+ int res;
+
+ res = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
+ &sti_crtc_funcs);
+ if (res) {
+ DRM_ERROR("Can not initialze CRTC\n");
+ return -EINVAL;
+ }
+
+ drm_crtc_helper_add(crtc, &sti_crtc_helper_funcs);
+
+ DRM_DEBUG_DRIVER("drm CRTC:%d mapped to %s\n",
+ crtc->base.id, sti_mixer_to_str(mixer));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.h b/drivers/gpu/drm/sti/sti_drm_crtc.h
new file mode 100644
index 000000000000..caca8b14f017
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_crtc.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_DRM_CRTC_H_
+#define _STI_DRM_CRTC_H_
+
+#include <drm/drmP.h>
+
+struct sti_mixer;
+
+int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
+ struct drm_plane *primary, struct drm_plane *cursor);
+int sti_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
+void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
+int sti_drm_crtc_vblank_cb(struct notifier_block *nb,
+ unsigned long event, void *data);
+bool sti_drm_crtc_is_main(struct drm_crtc *drm_crtc);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drm_drv.c
new file mode 100644
index 000000000000..a7cc24917a96
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_drv.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <drm/drmP.h>
+
+#include <linux/component.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "sti_drm_drv.h"
+#include "sti_drm_crtc.h"
+
+#define DRIVER_NAME "sti"
+#define DRIVER_DESC "STMicroelectronics SoC DRM"
+#define DRIVER_DATE "20140601"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+#define STI_MAX_FB_HEIGHT 4096
+#define STI_MAX_FB_WIDTH 4096
+
+static struct drm_mode_config_funcs sti_drm_mode_config_funcs = {
+ .fb_create = drm_fb_cma_create,
+};
+
+static void sti_drm_mode_config_init(struct drm_device *dev)
+{
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * set max width and height as default value.
+ * this value would be used to check framebuffer size limitation
+ * at drm_mode_addfb().
+ */
+ dev->mode_config.max_width = STI_MAX_FB_HEIGHT;
+ dev->mode_config.max_height = STI_MAX_FB_WIDTH;
+
+ dev->mode_config.funcs = &sti_drm_mode_config_funcs;
+}
+
+static int sti_drm_load(struct drm_device *dev, unsigned long flags)
+{
+ struct sti_drm_private *private;
+ int ret;
+
+ private = kzalloc(sizeof(struct sti_drm_private), GFP_KERNEL);
+ if (!private) {
+ DRM_ERROR("Failed to allocate private\n");
+ return -ENOMEM;
+ }
+ dev->dev_private = (void *)private;
+ private->drm_dev = dev;
+
+ drm_mode_config_init(dev);
+ drm_kms_helper_poll_init(dev);
+
+ sti_drm_mode_config_init(dev);
+
+ ret = component_bind_all(dev->dev, dev);
+ if (ret)
+ return ret;
+
+ drm_helper_disable_unused_functions(dev);
+
+#ifdef CONFIG_DRM_STI_FBDEV
+ drm_fbdev_cma_init(dev, 32,
+ dev->mode_config.num_crtc,
+ dev->mode_config.num_connector);
+#endif
+ return 0;
+}
+
+static const struct file_operations sti_drm_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = drm_gem_cma_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .release = drm_release,
+};
+
+static struct dma_buf *sti_drm_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags)
+{
+ /* we want to be able to write in mmapped buffer */
+ flags |= O_RDWR;
+ return drm_gem_prime_export(dev, obj, flags);
+}
+
+static struct drm_driver sti_drm_driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
+ DRIVER_GEM | DRIVER_PRIME,
+ .load = sti_drm_load,
+ .gem_free_object = drm_gem_cma_free_object,
+ .gem_vm_ops = &drm_gem_cma_vm_ops,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .fops = &sti_drm_driver_fops,
+
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = sti_drm_crtc_enable_vblank,
+ .disable_vblank = sti_drm_crtc_disable_vblank,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = sti_drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_vmap = drm_gem_cma_prime_vmap,
+ .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+ .gem_prime_mmap = drm_gem_cma_prime_mmap,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int sti_drm_bind(struct device *dev)
+{
+ return drm_platform_init(&sti_drm_driver, to_platform_device(dev));
+}
+
+static void sti_drm_unbind(struct device *dev)
+{
+ drm_put_dev(dev_get_drvdata(dev));
+}
+
+static const struct component_master_ops sti_drm_ops = {
+ .bind = sti_drm_bind,
+ .unbind = sti_drm_unbind,
+};
+
+static int sti_drm_master_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ struct device_node *child_np;
+ struct component_match *match = NULL;
+
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+
+ child_np = of_get_next_available_child(node, NULL);
+
+ while (child_np) {
+ component_match_add(dev, &match, compare_of, child_np);
+ of_node_put(child_np);
+ child_np = of_get_next_available_child(node, child_np);
+ }
+
+ return component_master_add_with_match(dev, &sti_drm_ops, match);
+}
+
+static int sti_drm_master_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &sti_drm_ops);
+ return 0;
+}
+
+static struct platform_driver sti_drm_master_driver = {
+ .probe = sti_drm_master_probe,
+ .remove = sti_drm_master_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME "__master",
+ },
+};
+
+static int sti_drm_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct platform_device *master;
+
+ of_platform_populate(node, NULL, NULL, dev);
+
+ platform_driver_register(&sti_drm_master_driver);
+ master = platform_device_register_resndata(dev,
+ DRIVER_NAME "__master", -1,
+ NULL, 0, NULL, 0);
+ if (!master)
+ return -EINVAL;
+
+ platform_set_drvdata(pdev, master);
+ return 0;
+}
+
+static int sti_drm_platform_remove(struct platform_device *pdev)
+{
+ struct platform_device *master = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(&pdev->dev);
+ platform_device_unregister(master);
+ platform_driver_unregister(&sti_drm_master_driver);
+ return 0;
+}
+
+static const struct of_device_id sti_drm_dt_ids[] = {
+ { .compatible = "st,sti-display-subsystem", },
+ { /* end node */ },
+};
+MODULE_DEVICE_TABLE(of, sti_drm_dt_ids);
+
+static struct platform_driver sti_drm_platform_driver = {
+ .probe = sti_drm_platform_probe,
+ .remove = sti_drm_platform_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .of_match_table = sti_drm_dt_ids,
+ },
+};
+
+module_platform_driver(sti_drm_platform_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.h b/drivers/gpu/drm/sti/sti_drm_drv.h
new file mode 100644
index 000000000000..ec5e2eb8dff9
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_drv.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_DRM_DRV_H_
+#define _STI_DRM_DRV_H_
+
+#include <drm/drmP.h>
+
+struct sti_compositor;
+struct sti_tvout;
+
+/**
+ * STI drm private structure
+ * This structure is stored as private in the drm_device
+ *
+ * @compo: compositor
+ * @plane_zorder_property: z-order property for CRTC planes
+ * @drm_dev: drm device
+ */
+struct sti_drm_private {
+ struct sti_compositor *compo;
+ struct drm_property *plane_zorder_property;
+ struct drm_device *drm_dev;
+};
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c
new file mode 100644
index 000000000000..f4118d4cac22
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_plane.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include "sti_compositor.h"
+#include "sti_drm_drv.h"
+#include "sti_drm_plane.h"
+#include "sti_vtg.h"
+
+enum sti_layer_desc sti_layer_default_zorder[] = {
+ STI_GDP_0,
+ STI_VID_0,
+ STI_GDP_1,
+ STI_VID_1,
+ STI_GDP_2,
+ STI_GDP_3,
+};
+
+/* (Background) < GDP0 < VID0 < GDP1 < VID1 < GDP2 < GDP3 < (ForeGround) */
+
+static int
+sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct sti_layer *layer = to_sti_layer(plane);
+ struct sti_mixer *mixer = to_sti_mixer(crtc);
+ int res;
+
+ DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s) drm fb:%d\n",
+ crtc->base.id, sti_mixer_to_str(mixer),
+ plane->base.id, sti_layer_to_str(layer), fb->base.id);
+ DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", crtc_w, crtc_h, crtc_x, crtc_y);
+
+ res = sti_mixer_set_layer_depth(mixer, layer);
+ if (res) {
+ DRM_ERROR("Can not set layer depth\n");
+ return res;
+ }
+
+ /* src_x are in 16.16 format. */
+ res = sti_layer_prepare(layer, fb, &crtc->mode, mixer->id,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x >> 16, src_y >> 16,
+ src_w >> 16, src_h >> 16);
+ if (res) {
+ DRM_ERROR("Layer prepare failed\n");
+ return res;
+ }
+
+ res = sti_layer_commit(layer);
+ if (res) {
+ DRM_ERROR("Layer commit failed\n");
+ return res;
+ }
+
+ res = sti_mixer_set_layer_status(mixer, layer, true);
+ if (res) {
+ DRM_ERROR("Can not enable layer at mixer\n");
+ return res;
+ }
+
+ return 0;
+}
+
+static int sti_drm_disable_plane(struct drm_plane *plane)
+{
+ struct sti_layer *layer;
+ struct sti_mixer *mixer;
+ int lay_res, mix_res;
+
+ if (!plane->crtc) {
+ DRM_DEBUG_DRIVER("drm plane:%d not enabled\n", plane->base.id);
+ return 0;
+ }
+ layer = to_sti_layer(plane);
+ mixer = to_sti_mixer(plane->crtc);
+
+ DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
+ plane->crtc->base.id, sti_mixer_to_str(mixer),
+ plane->base.id, sti_layer_to_str(layer));
+
+ /* Disable layer at mixer level */
+ mix_res = sti_mixer_set_layer_status(mixer, layer, false);
+ if (mix_res)
+ DRM_ERROR("Can not disable layer at mixer\n");
+
+ /* Wait a while to be sure that a Vsync event is received */
+ msleep(WAIT_NEXT_VSYNC_MS);
+
+ /* Then disable layer itself */
+ lay_res = sti_layer_disable(layer);
+ if (lay_res)
+ DRM_ERROR("Layer disable failed\n");
+
+ if (lay_res || mix_res)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void sti_drm_plane_destroy(struct drm_plane *plane)
+{
+ DRM_DEBUG_DRIVER("\n");
+
+ sti_drm_disable_plane(plane);
+ drm_plane_cleanup(plane);
+}
+
+static int sti_drm_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = plane->dev;
+ struct sti_drm_private *private = dev->dev_private;
+ struct sti_layer *layer = to_sti_layer(plane);
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (property == private->plane_zorder_property) {
+ layer->zorder = val;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct drm_plane_funcs sti_drm_plane_funcs = {
+ .update_plane = sti_drm_update_plane,
+ .disable_plane = sti_drm_disable_plane,
+ .destroy = sti_drm_plane_destroy,
+ .set_property = sti_drm_plane_set_property,
+};
+
+static void sti_drm_plane_attach_zorder_property(struct drm_plane *plane,
+ uint64_t default_val)
+{
+ struct drm_device *dev = plane->dev;
+ struct sti_drm_private *private = dev->dev_private;
+ struct drm_property *prop;
+ struct sti_layer *layer = to_sti_layer(plane);
+
+ prop = private->plane_zorder_property;
+ if (!prop) {
+ prop = drm_property_create_range(dev, 0, "zpos", 0,
+ GAM_MIXER_NB_DEPTH_LEVEL - 1);
+ if (!prop)
+ return;
+
+ private->plane_zorder_property = prop;
+ }
+
+ drm_object_attach_property(&plane->base, prop, default_val);
+ layer->zorder = default_val;
+}
+
+struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
+ struct sti_layer *layer,
+ unsigned int possible_crtcs,
+ enum drm_plane_type type)
+{
+ int err, i;
+ uint64_t default_zorder = 0;
+
+ err = drm_universal_plane_init(dev, &layer->plane, possible_crtcs,
+ &sti_drm_plane_funcs,
+ sti_layer_get_formats(layer),
+ sti_layer_get_nb_formats(layer), type);
+ if (err) {
+ DRM_ERROR("Failed to initialize plane\n");
+ return NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sti_layer_default_zorder); i++)
+ if (sti_layer_default_zorder[i] == layer->desc)
+ break;
+
+ default_zorder = i;
+
+ if (type == DRM_PLANE_TYPE_OVERLAY)
+ sti_drm_plane_attach_zorder_property(&layer->plane,
+ default_zorder);
+
+ DRM_DEBUG_DRIVER("drm plane:%d mapped to %s with zorder:%llu\n",
+ layer->plane.base.id,
+ sti_layer_to_str(layer), default_zorder);
+
+ return &layer->plane;
+}
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.h b/drivers/gpu/drm/sti/sti_drm_plane.h
new file mode 100644
index 000000000000..4f191839f2a7
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_drm_plane.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_DRM_PLANE_H_
+#define _STI_DRM_PLANE_H_
+
+#include <drm/drmP.h>
+
+struct sti_layer;
+
+struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
+ struct sti_layer *layer,
+ unsigned int possible_crtcs,
+ enum drm_plane_type type);
+#endif
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
new file mode 100644
index 000000000000..4e30b74559f5
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -0,0 +1,549 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+
+#include "sti_compositor.h"
+#include "sti_gdp.h"
+#include "sti_layer.h"
+#include "sti_vtg.h"
+
+#define ENA_COLOR_FILL BIT(8)
+#define WAIT_NEXT_VSYNC BIT(31)
+
+/* GDP color formats */
+#define GDP_RGB565 0x00
+#define GDP_RGB888 0x01
+#define GDP_RGB888_32 0x02
+#define GDP_ARGB8565 0x04
+#define GDP_ARGB8888 0x05
+#define GDP_ARGB1555 0x06
+#define GDP_ARGB4444 0x07
+#define GDP_CLUT8 0x0B
+#define GDP_YCBR888 0x10
+#define GDP_YCBR422R 0x12
+#define GDP_AYCBR8888 0x15
+
+#define GAM_GDP_CTL_OFFSET 0x00
+#define GAM_GDP_AGC_OFFSET 0x04
+#define GAM_GDP_VPO_OFFSET 0x0C
+#define GAM_GDP_VPS_OFFSET 0x10
+#define GAM_GDP_PML_OFFSET 0x14
+#define GAM_GDP_PMP_OFFSET 0x18
+#define GAM_GDP_SIZE_OFFSET 0x1C
+#define GAM_GDP_NVN_OFFSET 0x24
+#define GAM_GDP_KEY1_OFFSET 0x28
+#define GAM_GDP_KEY2_OFFSET 0x2C
+#define GAM_GDP_PPT_OFFSET 0x34
+#define GAM_GDP_CML_OFFSET 0x3C
+#define GAM_GDP_MST_OFFSET 0x68
+
+#define GAM_GDP_ALPHARANGE_255 BIT(5)
+#define GAM_GDP_AGC_FULL_RANGE 0x00808080
+#define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
+#define GAM_GDP_SIZE_MAX 0x7FF
+
+#define GDP_NODE_NB_BANK 2
+#define GDP_NODE_PER_FIELD 2
+
+struct sti_gdp_node {
+ u32 gam_gdp_ctl;
+ u32 gam_gdp_agc;
+ u32 reserved1;
+ u32 gam_gdp_vpo;
+ u32 gam_gdp_vps;
+ u32 gam_gdp_pml;
+ u32 gam_gdp_pmp;
+ u32 gam_gdp_size;
+ u32 reserved2;
+ u32 gam_gdp_nvn;
+ u32 gam_gdp_key1;
+ u32 gam_gdp_key2;
+ u32 reserved3;
+ u32 gam_gdp_ppt;
+ u32 reserved4;
+ u32 gam_gdp_cml;
+};
+
+struct sti_gdp_node_list {
+ struct sti_gdp_node *top_field;
+ struct sti_gdp_node *btm_field;
+};
+
+/**
+ * STI GDP structure
+ *
+ * @layer: layer structure
+ * @clk_pix: pixel clock for the current gdp
+ * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
+ * @is_curr_top: true if the current node processed is the top field
+ * @node_list: array of node list
+ */
+struct sti_gdp {
+ struct sti_layer layer;
+ struct clk *clk_pix;
+ struct notifier_block vtg_field_nb;
+ bool is_curr_top;
+ struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
+};
+
+#define to_sti_gdp(x) container_of(x, struct sti_gdp, layer)
+
+static const uint32_t gdp_supported_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_AYUV,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_C8,
+};
+
+static const uint32_t *sti_gdp_get_formats(struct sti_layer *layer)
+{
+ return gdp_supported_formats;
+}
+
+static unsigned int sti_gdp_get_nb_formats(struct sti_layer *layer)
+{
+ return ARRAY_SIZE(gdp_supported_formats);
+}
+
+static int sti_gdp_fourcc2format(int fourcc)
+{
+ switch (fourcc) {
+ case DRM_FORMAT_XRGB8888:
+ return GDP_RGB888_32;
+ case DRM_FORMAT_ARGB8888:
+ return GDP_ARGB8888;
+ case DRM_FORMAT_ARGB4444:
+ return GDP_ARGB4444;
+ case DRM_FORMAT_ARGB1555:
+ return GDP_ARGB1555;
+ case DRM_FORMAT_RGB565:
+ return GDP_RGB565;
+ case DRM_FORMAT_RGB888:
+ return GDP_RGB888;
+ case DRM_FORMAT_AYUV:
+ return GDP_AYCBR8888;
+ case DRM_FORMAT_YUV444:
+ return GDP_YCBR888;
+ case DRM_FORMAT_VYUY:
+ return GDP_YCBR422R;
+ case DRM_FORMAT_C8:
+ return GDP_CLUT8;
+ }
+ return -1;
+}
+
+static int sti_gdp_get_alpharange(int format)
+{
+ switch (format) {
+ case GDP_ARGB8565:
+ case GDP_ARGB8888:
+ case GDP_AYCBR8888:
+ return GAM_GDP_ALPHARANGE_255;
+ }
+ return 0;
+}
+
+/**
+ * sti_gdp_get_free_nodes
+ * @layer: gdp layer
+ *
+ * Look for a GDP node list that is not currently read by the HW.
+ *
+ * RETURNS:
+ * Pointer to the free GDP node list
+ */
+static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
+{
+ int hw_nvn;
+ void *virt_nvn;
+ struct sti_gdp *gdp = to_sti_gdp(layer);
+ unsigned int i;
+
+ hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
+ if (!hw_nvn)
+ goto end;
+
+ virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
+
+ for (i = 0; i < GDP_NODE_NB_BANK; i++)
+ if ((virt_nvn != gdp->node_list[i].btm_field) &&
+ (virt_nvn != gdp->node_list[i].top_field))
+ return &gdp->node_list[i];
+
+ /* in hazardious cases restart with the first node */
+ DRM_ERROR("inconsistent NVN for %s: 0x%08X\n",
+ sti_layer_to_str(layer), hw_nvn);
+
+end:
+ return &gdp->node_list[0];
+}
+
+/**
+ * sti_gdp_get_current_nodes
+ * @layer: GDP layer
+ *
+ * Look for GDP nodes that are currently read by the HW.
+ *
+ * RETURNS:
+ * Pointer to the current GDP node list
+ */
+static
+struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
+{
+ int hw_nvn;
+ void *virt_nvn;
+ struct sti_gdp *gdp = to_sti_gdp(layer);
+ unsigned int i;
+
+ hw_nvn = readl(layer->regs + GAM_GDP_NVN_OFFSET);
+ if (!hw_nvn)
+ goto end;
+
+ virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
+
+ for (i = 0; i < GDP_NODE_NB_BANK; i++)
+ if ((virt_nvn == gdp->node_list[i].btm_field) ||
+ (virt_nvn == gdp->node_list[i].top_field))
+ return &gdp->node_list[i];
+
+end:
+ DRM_DEBUG_DRIVER("Warning, NVN 0x%08X for %s does not match any node\n",
+ hw_nvn, sti_layer_to_str(layer));
+
+ return NULL;
+}
+
+/**
+ * sti_gdp_prepare_layer
+ * @lay: gdp layer
+ * @first_prepare: true if it is the first time this function is called
+ *
+ * Update the free GDP node list according to the layer properties.
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
+{
+ struct sti_gdp_node_list *list;
+ struct sti_gdp_node *top_field, *btm_field;
+ struct drm_display_mode *mode = layer->mode;
+ struct device *dev = layer->dev;
+ struct sti_gdp *gdp = to_sti_gdp(layer);
+ struct sti_compositor *compo = dev_get_drvdata(dev);
+ int format;
+ unsigned int depth, bpp;
+ int rate = mode->clock * 1000;
+ int res;
+ u32 ydo, xdo, yds, xds;
+
+ list = sti_gdp_get_free_nodes(layer);
+ top_field = list->top_field;
+ btm_field = list->btm_field;
+
+ dev_dbg(dev, "%s %s top_node:0x%p btm_node:0x%p\n", __func__,
+ sti_layer_to_str(layer), top_field, btm_field);
+
+ /* Build the top field from layer params */
+ top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
+ top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
+ format = sti_gdp_fourcc2format(layer->format);
+ if (format == -1) {
+ DRM_ERROR("Format not supported by GDP %.4s\n",
+ (char *)&layer->format);
+ return 1;
+ }
+ top_field->gam_gdp_ctl |= format;
+ top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
+ top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
+
+ /* pixel memory location */
+ drm_fb_get_bpp_depth(layer->format, &depth, &bpp);
+ top_field->gam_gdp_pml = (u32) layer->paddr + layer->offsets[0];
+ top_field->gam_gdp_pml += layer->src_x * (bpp >> 3);
+ top_field->gam_gdp_pml += layer->src_y * layer->pitches[0];
+
+ /* input parameters */
+ top_field->gam_gdp_pmp = layer->pitches[0];
+ top_field->gam_gdp_size =
+ clamp_val(layer->src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
+ clamp_val(layer->src_w, 0, GAM_GDP_SIZE_MAX);
+
+ /* output parameters */
+ ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
+ yds = sti_vtg_get_line_number(*mode, layer->dst_y + layer->dst_h - 1);
+ xdo = sti_vtg_get_pixel_number(*mode, layer->dst_x);
+ xds = sti_vtg_get_pixel_number(*mode, layer->dst_x + layer->dst_w - 1);
+ top_field->gam_gdp_vpo = (ydo << 16) | xdo;
+ top_field->gam_gdp_vps = (yds << 16) | xds;
+
+ /* Same content and chained together */
+ memcpy(btm_field, top_field, sizeof(*btm_field));
+ top_field->gam_gdp_nvn = virt_to_dma(dev, btm_field);
+ btm_field->gam_gdp_nvn = virt_to_dma(dev, top_field);
+
+ /* Interlaced mode */
+ if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
+ btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
+ layer->pitches[0];
+
+ if (first_prepare) {
+ /* Register gdp callback */
+ if (sti_vtg_register_client(layer->mixer_id == STI_MIXER_MAIN ?
+ compo->vtg_main : compo->vtg_aux,
+ &gdp->vtg_field_nb, layer->mixer_id)) {
+ DRM_ERROR("Cannot register VTG notifier\n");
+ return 1;
+ }
+
+ /* Set and enable gdp clock */
+ if (gdp->clk_pix) {
+ res = clk_set_rate(gdp->clk_pix, rate);
+ if (res < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
+ rate);
+ return 1;
+ }
+
+ if (clk_prepare_enable(gdp->clk_pix)) {
+ DRM_ERROR("Failed to prepare/enable gdp\n");
+ return 1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * sti_gdp_commit_layer
+ * @lay: gdp layer
+ *
+ * Update the NVN field of the 'right' field of the current GDP node (being
+ * used by the HW) with the address of the updated ('free') top field GDP node.
+ * - In interlaced mode the 'right' field is the bottom field as we update
+ * frames starting from their top field
+ * - In progressive mode, we update both bottom and top fields which are
+ * equal nodes.
+ * At the next VSYNC, the updated node list will be used by the HW.
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+static int sti_gdp_commit_layer(struct sti_layer *layer)
+{
+ struct sti_gdp_node_list *updated_list = sti_gdp_get_free_nodes(layer);
+ struct sti_gdp_node *updated_top_node = updated_list->top_field;
+ struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
+ struct sti_gdp *gdp = to_sti_gdp(layer);
+ u32 dma_updated_top = virt_to_dma(layer->dev, updated_top_node);
+ u32 dma_updated_btm = virt_to_dma(layer->dev, updated_btm_node);
+ struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
+
+ dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
+ sti_layer_to_str(layer),
+ updated_top_node, updated_btm_node);
+ dev_dbg(layer->dev, "Current NVN:0x%X\n",
+ readl(layer->regs + GAM_GDP_NVN_OFFSET));
+ dev_dbg(layer->dev, "Posted buff: %lx current buff: %x\n",
+ (unsigned long)layer->paddr,
+ readl(layer->regs + GAM_GDP_PML_OFFSET));
+
+ if (curr_list == NULL) {
+ /* First update or invalid node should directly write in the
+ * hw register */
+ DRM_DEBUG_DRIVER("%s first update (or invalid node)",
+ sti_layer_to_str(layer));
+
+ writel(gdp->is_curr_top == true ?
+ dma_updated_btm : dma_updated_top,
+ layer->regs + GAM_GDP_NVN_OFFSET);
+ return 0;
+ }
+
+ if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ if (gdp->is_curr_top == true) {
+ /* Do not update in the middle of the frame, but
+ * postpone the update after the bottom field has
+ * been displayed */
+ curr_list->btm_field->gam_gdp_nvn = dma_updated_top;
+ } else {
+ /* Direct update to avoid one frame delay */
+ writel(dma_updated_top,
+ layer->regs + GAM_GDP_NVN_OFFSET);
+ }
+ } else {
+ /* Direct update for progressive to avoid one frame delay */
+ writel(dma_updated_top, layer->regs + GAM_GDP_NVN_OFFSET);
+ }
+
+ return 0;
+}
+
+/**
+ * sti_gdp_disable_layer
+ * @lay: gdp layer
+ *
+ * Disable a GDP.
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+static int sti_gdp_disable_layer(struct sti_layer *layer)
+{
+ unsigned int i;
+ struct sti_gdp *gdp = to_sti_gdp(layer);
+ struct sti_compositor *compo = dev_get_drvdata(layer->dev);
+
+ DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
+
+ /* Set the nodes as 'to be ignored on mixer' */
+ for (i = 0; i < GDP_NODE_NB_BANK; i++) {
+ gdp->node_list[i].top_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
+ gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
+ }
+
+ if (sti_vtg_unregister_client(layer->mixer_id == STI_MIXER_MAIN ?
+ compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
+ DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
+
+ if (gdp->clk_pix)
+ clk_disable_unprepare(gdp->clk_pix);
+
+ return 0;
+}
+
+/**
+ * sti_gdp_field_cb
+ * @nb: notifier block
+ * @event: event message
+ * @data: private data
+ *
+ * Handle VTG top field and bottom field event.
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+int sti_gdp_field_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct sti_gdp *gdp = container_of(nb, struct sti_gdp, vtg_field_nb);
+
+ switch (event) {
+ case VTG_TOP_FIELD_EVENT:
+ gdp->is_curr_top = true;
+ break;
+ case VTG_BOTTOM_FIELD_EVENT:
+ gdp->is_curr_top = false;
+ break;
+ default:
+ DRM_ERROR("unsupported event: %lu\n", event);
+ break;
+ }
+
+ return 0;
+}
+
+static void sti_gdp_init(struct sti_layer *layer)
+{
+ struct sti_gdp *gdp = to_sti_gdp(layer);
+ struct device_node *np = layer->dev->of_node;
+ dma_addr_t dma;
+ void *base;
+ unsigned int i, size;
+
+ /* Allocate all the nodes within a single memory page */
+ size = sizeof(struct sti_gdp_node) *
+ GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
+
+ base = dma_alloc_writecombine(layer->dev,
+ size, &dma, GFP_KERNEL | GFP_DMA);
+ if (!base) {
+ DRM_ERROR("Failed to allocate memory for GDP node\n");
+ return;
+ }
+ memset(base, 0, size);
+
+ for (i = 0; i < GDP_NODE_NB_BANK; i++) {
+ if (virt_to_dma(layer->dev, base) & 0xF) {
+ DRM_ERROR("Mem alignment failed\n");
+ return;
+ }
+ gdp->node_list[i].top_field = base;
+ DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
+ base += sizeof(struct sti_gdp_node);
+
+ if (virt_to_dma(layer->dev, base) & 0xF) {
+ DRM_ERROR("Mem alignment failed\n");
+ return;
+ }
+ gdp->node_list[i].btm_field = base;
+ DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
+ base += sizeof(struct sti_gdp_node);
+ }
+
+ if (of_device_is_compatible(np, "st,stih407-compositor")) {
+ /* GDP of STiH407 chip have its own pixel clock */
+ char *clk_name;
+
+ switch (layer->desc) {
+ case STI_GDP_0:
+ clk_name = "pix_gdp1";
+ break;
+ case STI_GDP_1:
+ clk_name = "pix_gdp2";
+ break;
+ case STI_GDP_2:
+ clk_name = "pix_gdp3";
+ break;
+ case STI_GDP_3:
+ clk_name = "pix_gdp4";
+ break;
+ default:
+ DRM_ERROR("GDP id not recognized\n");
+ return;
+ }
+
+ gdp->clk_pix = devm_clk_get(layer->dev, clk_name);
+ if (IS_ERR(gdp->clk_pix))
+ DRM_ERROR("Cannot get %s clock\n", clk_name);
+ }
+}
+
+static const struct sti_layer_funcs gdp_ops = {
+ .get_formats = sti_gdp_get_formats,
+ .get_nb_formats = sti_gdp_get_nb_formats,
+ .init = sti_gdp_init,
+ .prepare = sti_gdp_prepare_layer,
+ .commit = sti_gdp_commit_layer,
+ .disable = sti_gdp_disable_layer,
+};
+
+struct sti_layer *sti_gdp_create(struct device *dev, int id)
+{
+ struct sti_gdp *gdp;
+
+ gdp = devm_kzalloc(dev, sizeof(*gdp), GFP_KERNEL);
+ if (!gdp) {
+ DRM_ERROR("Failed to allocate memory for GDP\n");
+ return NULL;
+ }
+
+ gdp->layer.ops = &gdp_ops;
+ gdp->vtg_field_nb.notifier_call = sti_gdp_field_cb;
+
+ return (struct sti_layer *)gdp;
+}
diff --git a/drivers/gpu/drm/sti/sti_gdp.h b/drivers/gpu/drm/sti/sti_gdp.h
new file mode 100644
index 000000000000..1dab68274ad3
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_gdp.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_GDP_H_
+#define _STI_GDP_H_
+
+#include <linux/types.h>
+
+struct sti_layer *sti_gdp_create(struct device *dev, int id);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
new file mode 100644
index 000000000000..72d957f81c05
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -0,0 +1,794 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+/* HDformatter registers */
+#define HDA_ANA_CFG 0x0000
+#define HDA_ANA_SCALE_CTRL_Y 0x0004
+#define HDA_ANA_SCALE_CTRL_CB 0x0008
+#define HDA_ANA_SCALE_CTRL_CR 0x000C
+#define HDA_ANA_ANC_CTRL 0x0010
+#define HDA_ANA_SRC_Y_CFG 0x0014
+#define HDA_COEFF_Y_PH1_TAP123 0x0018
+#define HDA_COEFF_Y_PH1_TAP456 0x001C
+#define HDA_COEFF_Y_PH2_TAP123 0x0020
+#define HDA_COEFF_Y_PH2_TAP456 0x0024
+#define HDA_COEFF_Y_PH3_TAP123 0x0028
+#define HDA_COEFF_Y_PH3_TAP456 0x002C
+#define HDA_COEFF_Y_PH4_TAP123 0x0030
+#define HDA_COEFF_Y_PH4_TAP456 0x0034
+#define HDA_ANA_SRC_C_CFG 0x0040
+#define HDA_COEFF_C_PH1_TAP123 0x0044
+#define HDA_COEFF_C_PH1_TAP456 0x0048
+#define HDA_COEFF_C_PH2_TAP123 0x004C
+#define HDA_COEFF_C_PH2_TAP456 0x0050
+#define HDA_COEFF_C_PH3_TAP123 0x0054
+#define HDA_COEFF_C_PH3_TAP456 0x0058
+#define HDA_COEFF_C_PH4_TAP123 0x005C
+#define HDA_COEFF_C_PH4_TAP456 0x0060
+#define HDA_SYNC_AWGI 0x0300
+
+/* HDA_ANA_CFG */
+#define CFG_AWG_ASYNC_EN BIT(0)
+#define CFG_AWG_ASYNC_HSYNC_MTD BIT(1)
+#define CFG_AWG_ASYNC_VSYNC_MTD BIT(2)
+#define CFG_AWG_SYNC_DEL BIT(3)
+#define CFG_AWG_FLTR_MODE_SHIFT 4
+#define CFG_AWG_FLTR_MODE_MASK (0xF << CFG_AWG_FLTR_MODE_SHIFT)
+#define CFG_AWG_FLTR_MODE_SD (0 << CFG_AWG_FLTR_MODE_SHIFT)
+#define CFG_AWG_FLTR_MODE_ED (1 << CFG_AWG_FLTR_MODE_SHIFT)
+#define CFG_AWG_FLTR_MODE_HD (2 << CFG_AWG_FLTR_MODE_SHIFT)
+#define CFG_SYNC_ON_PBPR_MASK BIT(8)
+#define CFG_PREFILTER_EN_MASK BIT(9)
+#define CFG_PBPR_SYNC_OFF_SHIFT 16
+#define CFG_PBPR_SYNC_OFF_MASK (0x7FF << CFG_PBPR_SYNC_OFF_SHIFT)
+#define CFG_PBPR_SYNC_OFF_VAL 0x117 /* Voltage dependent. stiH416 */
+
+/* Default scaling values */
+#define SCALE_CTRL_Y_DFLT 0x00C50256
+#define SCALE_CTRL_CB_DFLT 0x00DB0249
+#define SCALE_CTRL_CR_DFLT 0x00DB0249
+
+/* Video DACs control */
+#define VIDEO_DACS_CONTROL_MASK 0x0FFF
+#define VIDEO_DACS_CONTROL_SYSCFG2535 0x085C /* for stih416 */
+#define DAC_CFG_HD_OFF_SHIFT 5
+#define DAC_CFG_HD_OFF_MASK (0x7 << DAC_CFG_HD_OFF_SHIFT)
+#define VIDEO_DACS_CONTROL_SYSCFG5072 0x0120 /* for stih407 */
+#define DAC_CFG_HD_HZUVW_OFF_MASK BIT(1)
+
+
+/* Upsampler values for the alternative 2X Filter */
+#define SAMPLER_COEF_NB 8
+#define HDA_ANA_SRC_Y_CFG_ALT_2X 0x01130000
+static u32 coef_y_alt_2x[] = {
+ 0x00FE83FB, 0x1F900401, 0x00000000, 0x00000000,
+ 0x00F408F9, 0x055F7C25, 0x00000000, 0x00000000
+};
+
+#define HDA_ANA_SRC_C_CFG_ALT_2X 0x01750004
+static u32 coef_c_alt_2x[] = {
+ 0x001305F7, 0x05274BD0, 0x00000000, 0x00000000,
+ 0x0004907C, 0x09C80B9D, 0x00000000, 0x00000000
+};
+
+/* Upsampler values for the 4X Filter */
+#define HDA_ANA_SRC_Y_CFG_4X 0x01ED0005
+#define HDA_ANA_SRC_C_CFG_4X 0x01ED0004
+static u32 coef_yc_4x[] = {
+ 0x00FC827F, 0x008FE20B, 0x00F684FC, 0x050F7C24,
+ 0x00F4857C, 0x0A1F402E, 0x00FA027F, 0x0E076E1D
+};
+
+/* AWG instructions for some video modes */
+#define AWG_MAX_INST 64
+
+/* 720p@50 */
+static u32 AWGi_720p_50[] = {
+ 0x00000971, 0x00000C26, 0x0000013B, 0x00000CDA,
+ 0x00000104, 0x00000E7E, 0x00000E7F, 0x0000013B,
+ 0x00000D8E, 0x00000104, 0x00001804, 0x00000971,
+ 0x00000C26, 0x0000003B, 0x00000FB4, 0x00000FB5,
+ 0x00000104, 0x00001AE8
+};
+
+#define NN_720p_50 ARRAY_SIZE(AWGi_720p_50)
+
+/* 720p@60 */
+static u32 AWGi_720p_60[] = {
+ 0x00000971, 0x00000C26, 0x0000013B, 0x00000CDA,
+ 0x00000104, 0x00000E7E, 0x00000E7F, 0x0000013B,
+ 0x00000C44, 0x00000104, 0x00001804, 0x00000971,
+ 0x00000C26, 0x0000003B, 0x00000F0F, 0x00000F10,
+ 0x00000104, 0x00001AE8
+};
+
+#define NN_720p_60 ARRAY_SIZE(AWGi_720p_60)
+
+/* 1080p@30 */
+static u32 AWGi_1080p_30[] = {
+ 0x00000971, 0x00000C2A, 0x0000013B, 0x00000C56,
+ 0x00000104, 0x00000FDC, 0x00000FDD, 0x0000013B,
+ 0x00000C2A, 0x00000104, 0x00001804, 0x00000971,
+ 0x00000C2A, 0x0000003B, 0x00000EBE, 0x00000EBF,
+ 0x00000EBF, 0x00000104, 0x00001A2F, 0x00001C4B,
+ 0x00001C52
+};
+
+#define NN_1080p_30 ARRAY_SIZE(AWGi_1080p_30)
+
+/* 1080p@25 */
+static u32 AWGi_1080p_25[] = {
+ 0x00000971, 0x00000C2A, 0x0000013B, 0x00000C56,
+ 0x00000104, 0x00000FDC, 0x00000FDD, 0x0000013B,
+ 0x00000DE2, 0x00000104, 0x00001804, 0x00000971,
+ 0x00000C2A, 0x0000003B, 0x00000F51, 0x00000F51,
+ 0x00000F52, 0x00000104, 0x00001A2F, 0x00001C4B,
+ 0x00001C52
+};
+
+#define NN_1080p_25 ARRAY_SIZE(AWGi_1080p_25)
+
+/* 1080p@24 */
+static u32 AWGi_1080p_24[] = {
+ 0x00000971, 0x00000C2A, 0x0000013B, 0x00000C56,
+ 0x00000104, 0x00000FDC, 0x00000FDD, 0x0000013B,
+ 0x00000E50, 0x00000104, 0x00001804, 0x00000971,
+ 0x00000C2A, 0x0000003B, 0x00000F76, 0x00000F76,
+ 0x00000F76, 0x00000104, 0x00001A2F, 0x00001C4B,
+ 0x00001C52
+};
+
+#define NN_1080p_24 ARRAY_SIZE(AWGi_1080p_24)
+
+/* 720x480p@60 */
+static u32 AWGi_720x480p_60[] = {
+ 0x00000904, 0x00000F18, 0x0000013B, 0x00001805,
+ 0x00000904, 0x00000C3D, 0x0000003B, 0x00001A06
+};
+
+#define NN_720x480p_60 ARRAY_SIZE(AWGi_720x480p_60)
+
+/* Video mode category */
+enum sti_hda_vid_cat {
+ VID_SD,
+ VID_ED,
+ VID_HD_74M,
+ VID_HD_148M
+};
+
+struct sti_hda_video_config {
+ struct drm_display_mode mode;
+ u32 *awg_instr;
+ int nb_instr;
+ enum sti_hda_vid_cat vid_cat;
+};
+
+/* HD analog supported modes
+ * Interlaced modes may be added when supported by the whole display chain
+ */
+static const struct sti_hda_video_config hda_supported_modes[] = {
+ /* 1080p30 74.250Mhz */
+ {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_1080p_30, NN_1080p_30, VID_HD_74M},
+ /* 1080p30 74.176Mhz */
+ {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74176, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_1080p_30, NN_1080p_30, VID_HD_74M},
+ /* 1080p24 74.250Mhz */
+ {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_1080p_24, NN_1080p_24, VID_HD_74M},
+ /* 1080p24 74.176Mhz */
+ {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74176, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_1080p_24, NN_1080p_24, VID_HD_74M},
+ /* 1080p25 74.250Mhz */
+ {{DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_1080p_25, NN_1080p_25, VID_HD_74M},
+ /* 720p60 74.250Mhz */
+ {{DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_720p_60, NN_720p_60, VID_HD_74M},
+ /* 720p60 74.176Mhz */
+ {{DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74176, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_720p_60, NN_720p_60, VID_HD_74M},
+ /* 720p50 74.250Mhz */
+ {{DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC)},
+ AWGi_720p_50, NN_720p_50, VID_HD_74M},
+ /* 720x480p60 27.027Mhz */
+ {{DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27027, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ AWGi_720x480p_60, NN_720x480p_60, VID_ED},
+ /* 720x480p60 27.000Mhz */
+ {{DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)},
+ AWGi_720x480p_60, NN_720x480p_60, VID_ED}
+};
+
+/**
+ * STI hd analog structure
+ *
+ * @dev: driver device
+ * @drm_dev: pointer to drm device
+ * @mode: current display mode selected
+ * @regs: HD analog register
+ * @video_dacs_ctrl: video DACS control register
+ * @enabled: true if HD analog is enabled else false
+ */
+struct sti_hda {
+ struct device dev;
+ struct drm_device *drm_dev;
+ struct drm_display_mode mode;
+ void __iomem *regs;
+ void __iomem *video_dacs_ctrl;
+ struct clk *clk_pix;
+ struct clk *clk_hddac;
+ bool enabled;
+};
+
+struct sti_hda_connector {
+ struct drm_connector drm_connector;
+ struct drm_encoder *encoder;
+ struct sti_hda *hda;
+};
+
+#define to_sti_hda_connector(x) \
+ container_of(x, struct sti_hda_connector, drm_connector)
+
+static u32 hda_read(struct sti_hda *hda, int offset)
+{
+ return readl(hda->regs + offset);
+}
+
+static void hda_write(struct sti_hda *hda, u32 val, int offset)
+{
+ writel(val, hda->regs + offset);
+}
+
+/**
+ * Search for a video mode in the supported modes table
+ *
+ * @mode: mode being searched
+ * @idx: index of the found mode
+ *
+ * Return true if mode is found
+ */
+static bool hda_get_mode_idx(struct drm_display_mode mode, int *idx)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(hda_supported_modes); i++)
+ if (drm_mode_equal(&hda_supported_modes[i].mode, &mode)) {
+ *idx = i;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * Enable the HD DACS
+ *
+ * @hda: pointer to HD analog structure
+ * @enable: true if HD DACS need to be enabled, else false
+ */
+static void hda_enable_hd_dacs(struct sti_hda *hda, bool enable)
+{
+ u32 mask;
+
+ if (hda->video_dacs_ctrl) {
+ u32 val;
+
+ switch ((u32)hda->video_dacs_ctrl & VIDEO_DACS_CONTROL_MASK) {
+ case VIDEO_DACS_CONTROL_SYSCFG2535:
+ mask = DAC_CFG_HD_OFF_MASK;
+ break;
+ case VIDEO_DACS_CONTROL_SYSCFG5072:
+ mask = DAC_CFG_HD_HZUVW_OFF_MASK;
+ break;
+ default:
+ DRM_INFO("Video DACS control register not supported!");
+ return;
+ }
+
+ val = readl(hda->video_dacs_ctrl);
+ if (enable)
+ val &= ~mask;
+ else
+ val |= mask;
+
+ writel(val, hda->video_dacs_ctrl);
+ }
+}
+
+/**
+ * Configure AWG, writing instructions
+ *
+ * @hda: pointer to HD analog structure
+ * @awg_instr: pointer to AWG instructions table
+ * @nb: nb of AWG instructions
+ */
+static void sti_hda_configure_awg(struct sti_hda *hda, u32 *awg_instr, int nb)
+{
+ unsigned int i;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ for (i = 0; i < nb; i++)
+ hda_write(hda, awg_instr[i], HDA_SYNC_AWGI + i * 4);
+ for (i = nb; i < AWG_MAX_INST; i++)
+ hda_write(hda, 0, HDA_SYNC_AWGI + i * 4);
+}
+
+static void sti_hda_disable(struct drm_bridge *bridge)
+{
+ struct sti_hda *hda = bridge->driver_private;
+ u32 val;
+
+ if (!hda->enabled)
+ return;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Disable HD DAC and AWG */
+ val = hda_read(hda, HDA_ANA_CFG);
+ val &= ~CFG_AWG_ASYNC_EN;
+ hda_write(hda, val, HDA_ANA_CFG);
+ hda_write(hda, 0, HDA_ANA_ANC_CTRL);
+
+ hda_enable_hd_dacs(hda, false);
+
+ /* Disable/unprepare hda clock */
+ clk_disable_unprepare(hda->clk_hddac);
+ clk_disable_unprepare(hda->clk_pix);
+
+ hda->enabled = false;
+}
+
+static void sti_hda_pre_enable(struct drm_bridge *bridge)
+{
+ struct sti_hda *hda = bridge->driver_private;
+ u32 val, i, mode_idx;
+ u32 src_filter_y, src_filter_c;
+ u32 *coef_y, *coef_c;
+ u32 filter_mode;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (hda->enabled)
+ return;
+
+ /* Prepare/enable clocks */
+ if (clk_prepare_enable(hda->clk_pix))
+ DRM_ERROR("Failed to prepare/enable hda_pix clk\n");
+ if (clk_prepare_enable(hda->clk_hddac))
+ DRM_ERROR("Failed to prepare/enable hda_hddac clk\n");
+
+ if (!hda_get_mode_idx(hda->mode, &mode_idx)) {
+ DRM_ERROR("Undefined mode\n");
+ return;
+ }
+
+ switch (hda_supported_modes[mode_idx].vid_cat) {
+ case VID_HD_148M:
+ DRM_ERROR("Beyond HD analog capabilities\n");
+ return;
+ case VID_HD_74M:
+ /* HD use alternate 2x filter */
+ filter_mode = CFG_AWG_FLTR_MODE_HD;
+ src_filter_y = HDA_ANA_SRC_Y_CFG_ALT_2X;
+ src_filter_c = HDA_ANA_SRC_C_CFG_ALT_2X;
+ coef_y = coef_y_alt_2x;
+ coef_c = coef_c_alt_2x;
+ break;
+ case VID_ED:
+ /* ED uses 4x filter */
+ filter_mode = CFG_AWG_FLTR_MODE_ED;
+ src_filter_y = HDA_ANA_SRC_Y_CFG_4X;
+ src_filter_c = HDA_ANA_SRC_C_CFG_4X;
+ coef_y = coef_yc_4x;
+ coef_c = coef_yc_4x;
+ break;
+ case VID_SD:
+ DRM_ERROR("Not supported\n");
+ return;
+ default:
+ DRM_ERROR("Undefined resolution\n");
+ return;
+ }
+ DRM_DEBUG_DRIVER("Using HDA mode #%d\n", mode_idx);
+
+ /* Enable HD Video DACs */
+ hda_enable_hd_dacs(hda, true);
+
+ /* Configure scaler */
+ hda_write(hda, SCALE_CTRL_Y_DFLT, HDA_ANA_SCALE_CTRL_Y);
+ hda_write(hda, SCALE_CTRL_CB_DFLT, HDA_ANA_SCALE_CTRL_CB);
+ hda_write(hda, SCALE_CTRL_CR_DFLT, HDA_ANA_SCALE_CTRL_CR);
+
+ /* Configure sampler */
+ hda_write(hda , src_filter_y, HDA_ANA_SRC_Y_CFG);
+ hda_write(hda, src_filter_c, HDA_ANA_SRC_C_CFG);
+ for (i = 0; i < SAMPLER_COEF_NB; i++) {
+ hda_write(hda, coef_y[i], HDA_COEFF_Y_PH1_TAP123 + i * 4);
+ hda_write(hda, coef_c[i], HDA_COEFF_C_PH1_TAP123 + i * 4);
+ }
+
+ /* Configure main HDFormatter */
+ val = 0;
+ val |= (hda->mode.flags & DRM_MODE_FLAG_INTERLACE) ?
+ 0 : CFG_AWG_ASYNC_VSYNC_MTD;
+ val |= (CFG_PBPR_SYNC_OFF_VAL << CFG_PBPR_SYNC_OFF_SHIFT);
+ val |= filter_mode;
+ hda_write(hda, val, HDA_ANA_CFG);
+
+ /* Configure AWG */
+ sti_hda_configure_awg(hda, hda_supported_modes[mode_idx].awg_instr,
+ hda_supported_modes[mode_idx].nb_instr);
+
+ /* Enable AWG */
+ val = hda_read(hda, HDA_ANA_CFG);
+ val |= CFG_AWG_ASYNC_EN;
+ hda_write(hda, val, HDA_ANA_CFG);
+
+ hda->enabled = true;
+}
+
+static void sti_hda_set_mode(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sti_hda *hda = bridge->driver_private;
+ u32 mode_idx;
+ int hddac_rate;
+ int ret;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ memcpy(&hda->mode, mode, sizeof(struct drm_display_mode));
+
+ if (!hda_get_mode_idx(hda->mode, &mode_idx)) {
+ DRM_ERROR("Undefined mode\n");
+ return;
+ }
+
+ switch (hda_supported_modes[mode_idx].vid_cat) {
+ case VID_HD_74M:
+ /* HD use alternate 2x filter */
+ hddac_rate = mode->clock * 1000 * 2;
+ break;
+ case VID_ED:
+ /* ED uses 4x filter */
+ hddac_rate = mode->clock * 1000 * 4;
+ break;
+ default:
+ DRM_ERROR("Undefined mode\n");
+ return;
+ }
+
+ /* HD DAC = 148.5Mhz or 108 Mhz */
+ ret = clk_set_rate(hda->clk_hddac, hddac_rate);
+ if (ret < 0)
+ DRM_ERROR("Cannot set rate (%dHz) for hda_hddac clk\n",
+ hddac_rate);
+
+ /* HDformatter clock = compositor clock */
+ ret = clk_set_rate(hda->clk_pix, mode->clock * 1000);
+ if (ret < 0)
+ DRM_ERROR("Cannot set rate (%dHz) for hda_pix clk\n",
+ mode->clock * 1000);
+}
+
+static void sti_hda_bridge_nope(struct drm_bridge *bridge)
+{
+ /* do nothing */
+}
+
+static void sti_hda_brigde_destroy(struct drm_bridge *bridge)
+{
+ drm_bridge_cleanup(bridge);
+ kfree(bridge);
+}
+
+static const struct drm_bridge_funcs sti_hda_bridge_funcs = {
+ .pre_enable = sti_hda_pre_enable,
+ .enable = sti_hda_bridge_nope,
+ .disable = sti_hda_disable,
+ .post_disable = sti_hda_bridge_nope,
+ .mode_set = sti_hda_set_mode,
+ .destroy = sti_hda_brigde_destroy,
+};
+
+static int sti_hda_connector_get_modes(struct drm_connector *connector)
+{
+ unsigned int i;
+ int count = 0;
+ struct sti_hda_connector *hda_connector
+ = to_sti_hda_connector(connector);
+ struct sti_hda *hda = hda_connector->hda;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ for (i = 0; i < ARRAY_SIZE(hda_supported_modes); i++) {
+ struct drm_display_mode *mode =
+ drm_mode_duplicate(hda->drm_dev,
+ &hda_supported_modes[i].mode);
+ if (!mode)
+ continue;
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ /* the first mode is the preferred mode */
+ if (i == 0)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+ count++;
+ }
+
+ drm_mode_sort(&connector->modes);
+
+ return count;
+}
+
+#define CLK_TOLERANCE_HZ 50
+
+static int sti_hda_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int target = mode->clock * 1000;
+ int target_min = target - CLK_TOLERANCE_HZ;
+ int target_max = target + CLK_TOLERANCE_HZ;
+ int result;
+ int idx;
+ struct sti_hda_connector *hda_connector
+ = to_sti_hda_connector(connector);
+ struct sti_hda *hda = hda_connector->hda;
+
+ if (!hda_get_mode_idx(*mode, &idx)) {
+ return MODE_BAD;
+ } else {
+ result = clk_round_rate(hda->clk_pix, target);
+
+ DRM_DEBUG_DRIVER("target rate = %d => available rate = %d\n",
+ target, result);
+
+ if ((result < target_min) || (result > target_max)) {
+ DRM_DEBUG_DRIVER("hda pixclk=%d not supported\n",
+ target);
+ return MODE_BAD;
+ }
+ }
+
+ return MODE_OK;
+}
+
+struct drm_encoder *sti_hda_best_encoder(struct drm_connector *connector)
+{
+ struct sti_hda_connector *hda_connector
+ = to_sti_hda_connector(connector);
+
+ /* Best encoder is the one associated during connector creation */
+ return hda_connector->encoder;
+}
+
+static struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = {
+ .get_modes = sti_hda_connector_get_modes,
+ .mode_valid = sti_hda_connector_mode_valid,
+ .best_encoder = sti_hda_best_encoder,
+};
+
+static enum drm_connector_status
+sti_hda_connector_detect(struct drm_connector *connector, bool force)
+{
+ return connector_status_connected;
+}
+
+static void sti_hda_connector_destroy(struct drm_connector *connector)
+{
+ struct sti_hda_connector *hda_connector
+ = to_sti_hda_connector(connector);
+
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(hda_connector);
+}
+
+static struct drm_connector_funcs sti_hda_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = sti_hda_connector_detect,
+ .destroy = sti_hda_connector_destroy,
+};
+
+static struct drm_encoder *sti_hda_find_encoder(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DAC)
+ return encoder;
+ }
+
+ return NULL;
+}
+
+static int sti_hda_bind(struct device *dev, struct device *master, void *data)
+{
+ struct sti_hda *hda = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct drm_encoder *encoder;
+ struct sti_hda_connector *connector;
+ struct drm_connector *drm_connector;
+ struct drm_bridge *bridge;
+ int err;
+
+ /* Set the drm device handle */
+ hda->drm_dev = drm_dev;
+
+ encoder = sti_hda_find_encoder(drm_dev);
+ if (!encoder)
+ return -ENOMEM;
+
+ connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return -ENOMEM;
+
+ connector->hda = hda;
+
+ bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver_private = hda;
+ drm_bridge_init(drm_dev, bridge, &sti_hda_bridge_funcs);
+
+ encoder->bridge = bridge;
+ connector->encoder = encoder;
+
+ drm_connector = (struct drm_connector *)connector;
+
+ drm_connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ drm_connector_init(drm_dev, drm_connector,
+ &sti_hda_connector_funcs, DRM_MODE_CONNECTOR_Component);
+ drm_connector_helper_add(drm_connector,
+ &sti_hda_connector_helper_funcs);
+
+ err = drm_connector_register(drm_connector);
+ if (err)
+ goto err_connector;
+
+ err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+ if (err) {
+ DRM_ERROR("Failed to attach a connector to a encoder\n");
+ goto err_sysfs;
+ }
+
+ return 0;
+
+err_sysfs:
+ drm_connector_unregister(drm_connector);
+err_connector:
+ drm_bridge_cleanup(bridge);
+ drm_connector_cleanup(drm_connector);
+ return -EINVAL;
+}
+
+static void sti_hda_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+ /* do nothing */
+}
+
+static const struct component_ops sti_hda_ops = {
+ .bind = sti_hda_bind,
+ .unbind = sti_hda_unbind,
+};
+
+static int sti_hda_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sti_hda *hda;
+ struct resource *res;
+
+ DRM_INFO("%s\n", __func__);
+
+ hda = devm_kzalloc(dev, sizeof(*hda), GFP_KERNEL);
+ if (!hda)
+ return -ENOMEM;
+
+ hda->dev = pdev->dev;
+
+ /* Get resources */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hda-reg");
+ if (!res) {
+ DRM_ERROR("Invalid hda resource\n");
+ return -ENOMEM;
+ }
+ hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (IS_ERR(hda->regs))
+ return PTR_ERR(hda->regs);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "video-dacs-ctrl");
+ if (res) {
+ hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start,
+ resource_size(res));
+ if (IS_ERR(hda->video_dacs_ctrl))
+ return PTR_ERR(hda->video_dacs_ctrl);
+ } else {
+ /* If no existing video-dacs-ctrl resource continue the probe */
+ DRM_DEBUG_DRIVER("No video-dacs-ctrl resource\n");
+ hda->video_dacs_ctrl = NULL;
+ }
+
+ /* Get clock resources */
+ hda->clk_pix = devm_clk_get(dev, "pix");
+ if (IS_ERR(hda->clk_pix)) {
+ DRM_ERROR("Cannot get hda_pix clock\n");
+ return PTR_ERR(hda->clk_pix);
+ }
+
+ hda->clk_hddac = devm_clk_get(dev, "hddac");
+ if (IS_ERR(hda->clk_hddac)) {
+ DRM_ERROR("Cannot get hda_hddac clock\n");
+ return PTR_ERR(hda->clk_hddac);
+ }
+
+ platform_set_drvdata(pdev, hda);
+
+ return component_add(&pdev->dev, &sti_hda_ops);
+}
+
+static int sti_hda_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sti_hda_ops);
+ return 0;
+}
+
+static struct of_device_id hda_of_match[] = {
+ { .compatible = "st,stih416-hda", },
+ { .compatible = "st,stih407-hda", },
+ { /* end node */ }
+};
+MODULE_DEVICE_TABLE(of, hda_of_match);
+
+struct platform_driver sti_hda_driver = {
+ .driver = {
+ .name = "sti-hda",
+ .owner = THIS_MODULE,
+ .of_match_table = hda_of_match,
+ },
+ .probe = sti_hda_probe,
+ .remove = sti_hda_remove,
+};
+
+module_platform_driver(sti_hda_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
new file mode 100644
index 000000000000..284e541d970d
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -0,0 +1,810 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/hdmi.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+
+#include "sti_hdmi.h"
+#include "sti_hdmi_tx3g4c28phy.h"
+#include "sti_hdmi_tx3g0c55phy.h"
+#include "sti_vtg.h"
+
+#define HDMI_CFG 0x0000
+#define HDMI_INT_EN 0x0004
+#define HDMI_INT_STA 0x0008
+#define HDMI_INT_CLR 0x000C
+#define HDMI_STA 0x0010
+#define HDMI_ACTIVE_VID_XMIN 0x0100
+#define HDMI_ACTIVE_VID_XMAX 0x0104
+#define HDMI_ACTIVE_VID_YMIN 0x0108
+#define HDMI_ACTIVE_VID_YMAX 0x010C
+#define HDMI_DFLT_CHL0_DAT 0x0110
+#define HDMI_DFLT_CHL1_DAT 0x0114
+#define HDMI_DFLT_CHL2_DAT 0x0118
+#define HDMI_SW_DI_1_HEAD_WORD 0x0210
+#define HDMI_SW_DI_1_PKT_WORD0 0x0214
+#define HDMI_SW_DI_1_PKT_WORD1 0x0218
+#define HDMI_SW_DI_1_PKT_WORD2 0x021C
+#define HDMI_SW_DI_1_PKT_WORD3 0x0220
+#define HDMI_SW_DI_1_PKT_WORD4 0x0224
+#define HDMI_SW_DI_1_PKT_WORD5 0x0228
+#define HDMI_SW_DI_1_PKT_WORD6 0x022C
+#define HDMI_SW_DI_CFG 0x0230
+
+#define HDMI_IFRAME_SLOT_AVI 1
+
+#define XCAT(prefix, x, suffix) prefix ## x ## suffix
+#define HDMI_SW_DI_N_HEAD_WORD(x) XCAT(HDMI_SW_DI_, x, _HEAD_WORD)
+#define HDMI_SW_DI_N_PKT_WORD0(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD0)
+#define HDMI_SW_DI_N_PKT_WORD1(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD1)
+#define HDMI_SW_DI_N_PKT_WORD2(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD2)
+#define HDMI_SW_DI_N_PKT_WORD3(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD3)
+#define HDMI_SW_DI_N_PKT_WORD4(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD4)
+#define HDMI_SW_DI_N_PKT_WORD5(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD5)
+#define HDMI_SW_DI_N_PKT_WORD6(x) XCAT(HDMI_SW_DI_, x, _PKT_WORD6)
+
+#define HDMI_IFRAME_DISABLED 0x0
+#define HDMI_IFRAME_SINGLE_SHOT 0x1
+#define HDMI_IFRAME_FIELD 0x2
+#define HDMI_IFRAME_FRAME 0x3
+#define HDMI_IFRAME_MASK 0x3
+#define HDMI_IFRAME_CFG_DI_N(x, n) ((x) << ((n-1)*4)) /* n from 1 to 6 */
+
+#define HDMI_CFG_DEVICE_EN BIT(0)
+#define HDMI_CFG_HDMI_NOT_DVI BIT(1)
+#define HDMI_CFG_HDCP_EN BIT(2)
+#define HDMI_CFG_ESS_NOT_OESS BIT(3)
+#define HDMI_CFG_H_SYNC_POL_NEG BIT(4)
+#define HDMI_CFG_SINK_TERM_DET_EN BIT(5)
+#define HDMI_CFG_V_SYNC_POL_NEG BIT(6)
+#define HDMI_CFG_422_EN BIT(8)
+#define HDMI_CFG_FIFO_OVERRUN_CLR BIT(12)
+#define HDMI_CFG_FIFO_UNDERRUN_CLR BIT(13)
+#define HDMI_CFG_SW_RST_EN BIT(31)
+
+#define HDMI_INT_GLOBAL BIT(0)
+#define HDMI_INT_SW_RST BIT(1)
+#define HDMI_INT_PIX_CAP BIT(3)
+#define HDMI_INT_HOT_PLUG BIT(4)
+#define HDMI_INT_DLL_LCK BIT(5)
+#define HDMI_INT_NEW_FRAME BIT(6)
+#define HDMI_INT_GENCTRL_PKT BIT(7)
+#define HDMI_INT_SINK_TERM_PRESENT BIT(11)
+
+#define HDMI_DEFAULT_INT (HDMI_INT_SINK_TERM_PRESENT \
+ | HDMI_INT_DLL_LCK \
+ | HDMI_INT_HOT_PLUG \
+ | HDMI_INT_GLOBAL)
+
+#define HDMI_WORKING_INT (HDMI_INT_SINK_TERM_PRESENT \
+ | HDMI_INT_GENCTRL_PKT \
+ | HDMI_INT_NEW_FRAME \
+ | HDMI_INT_DLL_LCK \
+ | HDMI_INT_HOT_PLUG \
+ | HDMI_INT_PIX_CAP \
+ | HDMI_INT_SW_RST \
+ | HDMI_INT_GLOBAL)
+
+#define HDMI_STA_SW_RST BIT(1)
+
+struct sti_hdmi_connector {
+ struct drm_connector drm_connector;
+ struct drm_encoder *encoder;
+ struct sti_hdmi *hdmi;
+};
+
+#define to_sti_hdmi_connector(x) \
+ container_of(x, struct sti_hdmi_connector, drm_connector)
+
+u32 hdmi_read(struct sti_hdmi *hdmi, int offset)
+{
+ return readl(hdmi->regs + offset);
+}
+
+void hdmi_write(struct sti_hdmi *hdmi, u32 val, int offset)
+{
+ writel(val, hdmi->regs + offset);
+}
+
+/**
+ * HDMI interrupt handler threaded
+ *
+ * @irq: irq number
+ * @arg: connector structure
+ */
+static irqreturn_t hdmi_irq_thread(int irq, void *arg)
+{
+ struct sti_hdmi *hdmi = arg;
+
+ /* Hot plug/unplug IRQ */
+ if (hdmi->irq_status & HDMI_INT_HOT_PLUG) {
+ /* read gpio to get the status */
+ hdmi->hpd = gpio_get_value(hdmi->hpd_gpio);
+ if (hdmi->drm_dev)
+ drm_helper_hpd_irq_event(hdmi->drm_dev);
+ }
+
+ /* Sw reset and PLL lock are exclusive so we can use the same
+ * event to signal them
+ */
+ if (hdmi->irq_status & (HDMI_INT_SW_RST | HDMI_INT_DLL_LCK)) {
+ hdmi->event_received = true;
+ wake_up_interruptible(&hdmi->wait_event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * HDMI interrupt handler
+ *
+ * @irq: irq number
+ * @arg: connector structure
+ */
+static irqreturn_t hdmi_irq(int irq, void *arg)
+{
+ struct sti_hdmi *hdmi = arg;
+
+ /* read interrupt status */
+ hdmi->irq_status = hdmi_read(hdmi, HDMI_INT_STA);
+
+ /* clear interrupt status */
+ hdmi_write(hdmi, hdmi->irq_status, HDMI_INT_CLR);
+
+ /* force sync bus write */
+ hdmi_read(hdmi, HDMI_INT_STA);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/**
+ * Set hdmi active area depending on the drm display mode selected
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ */
+static void hdmi_active_area(struct sti_hdmi *hdmi)
+{
+ u32 xmin, xmax;
+ u32 ymin, ymax;
+
+ xmin = sti_vtg_get_pixel_number(hdmi->mode, 0);
+ xmax = sti_vtg_get_pixel_number(hdmi->mode, hdmi->mode.hdisplay - 1);
+ ymin = sti_vtg_get_line_number(hdmi->mode, 0);
+ ymax = sti_vtg_get_line_number(hdmi->mode, hdmi->mode.vdisplay - 1);
+
+ hdmi_write(hdmi, xmin, HDMI_ACTIVE_VID_XMIN);
+ hdmi_write(hdmi, xmax, HDMI_ACTIVE_VID_XMAX);
+ hdmi_write(hdmi, ymin, HDMI_ACTIVE_VID_YMIN);
+ hdmi_write(hdmi, ymax, HDMI_ACTIVE_VID_YMAX);
+}
+
+/**
+ * Overall hdmi configuration
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ */
+static void hdmi_config(struct sti_hdmi *hdmi)
+{
+ u32 conf;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Clear overrun and underrun fifo */
+ conf = HDMI_CFG_FIFO_OVERRUN_CLR | HDMI_CFG_FIFO_UNDERRUN_CLR;
+
+ /* Enable HDMI mode not DVI */
+ conf |= HDMI_CFG_HDMI_NOT_DVI | HDMI_CFG_ESS_NOT_OESS;
+
+ /* Enable sink term detection */
+ conf |= HDMI_CFG_SINK_TERM_DET_EN;
+
+ /* Set Hsync polarity */
+ if (hdmi->mode.flags & DRM_MODE_FLAG_NHSYNC) {
+ DRM_DEBUG_DRIVER("H Sync Negative\n");
+ conf |= HDMI_CFG_H_SYNC_POL_NEG;
+ }
+
+ /* Set Vsync polarity */
+ if (hdmi->mode.flags & DRM_MODE_FLAG_NVSYNC) {
+ DRM_DEBUG_DRIVER("V Sync Negative\n");
+ conf |= HDMI_CFG_V_SYNC_POL_NEG;
+ }
+
+ /* Enable HDMI */
+ conf |= HDMI_CFG_DEVICE_EN;
+
+ hdmi_write(hdmi, conf, HDMI_CFG);
+}
+
+/**
+ * Prepare and configure the AVI infoframe
+ *
+ * AVI infoframe are transmitted at least once per two video field and
+ * contains information about HDMI transmission mode such as color space,
+ * colorimetry, ...
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ * Return negative value if error occurs
+ */
+static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
+{
+ struct drm_display_mode *mode = &hdmi->mode;
+ struct hdmi_avi_infoframe infoframe;
+ u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
+ u8 *frame = buffer + HDMI_INFOFRAME_HEADER_SIZE;
+ u32 val;
+ int ret;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe, mode);
+ if (ret < 0) {
+ DRM_ERROR("failed to setup AVI infoframe: %d\n", ret);
+ return ret;
+ }
+
+ /* fixed infoframe configuration not linked to the mode */
+ infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ infoframe.colorimetry = HDMI_COLORIMETRY_NONE;
+
+ ret = hdmi_avi_infoframe_pack(&infoframe, buffer, sizeof(buffer));
+ if (ret < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %d\n", ret);
+ return ret;
+ }
+
+ /* Disable transmission slot for AVI infoframe */
+ val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
+ val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, HDMI_IFRAME_SLOT_AVI);
+ hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
+
+ /* Infoframe header */
+ val = buffer[0x0];
+ val |= buffer[0x1] << 8;
+ val |= buffer[0x2] << 16;
+ hdmi_write(hdmi, val, HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI));
+
+ /* Infoframe packet bytes */
+ val = frame[0x0];
+ val |= frame[0x1] << 8;
+ val |= frame[0x2] << 16;
+ val |= frame[0x3] << 24;
+ hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI));
+
+ val = frame[0x4];
+ val |= frame[0x5] << 8;
+ val |= frame[0x6] << 16;
+ val |= frame[0x7] << 24;
+ hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD1(HDMI_IFRAME_SLOT_AVI));
+
+ val = frame[0x8];
+ val |= frame[0x9] << 8;
+ val |= frame[0xA] << 16;
+ val |= frame[0xB] << 24;
+ hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI));
+
+ val = frame[0xC];
+ val |= frame[0xD] << 8;
+ hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI));
+
+ /* Enable transmission slot for AVI infoframe
+ * According to the hdmi specification, AVI infoframe should be
+ * transmitted at least once per two video fields
+ */
+ val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
+ val |= HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_FIELD, HDMI_IFRAME_SLOT_AVI);
+ hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
+
+ return 0;
+}
+
+/**
+ * Software reset of the hdmi subsystem
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ */
+#define HDMI_TIMEOUT_SWRESET 100 /*milliseconds */
+static void hdmi_swreset(struct sti_hdmi *hdmi)
+{
+ u32 val;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Enable hdmi_audio clock only during hdmi reset */
+ if (clk_prepare_enable(hdmi->clk_audio))
+ DRM_INFO("Failed to prepare/enable hdmi_audio clk\n");
+
+ /* Sw reset */
+ hdmi->event_received = false;
+
+ val = hdmi_read(hdmi, HDMI_CFG);
+ val |= HDMI_CFG_SW_RST_EN;
+ hdmi_write(hdmi, val, HDMI_CFG);
+
+ /* Wait reset completed */
+ wait_event_interruptible_timeout(hdmi->wait_event,
+ hdmi->event_received == true,
+ msecs_to_jiffies
+ (HDMI_TIMEOUT_SWRESET));
+
+ /*
+ * HDMI_STA_SW_RST bit is set to '1' when SW_RST bit in HDMI_CFG is
+ * set to '1' and clk_audio is running.
+ */
+ if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_SW_RST) == 0)
+ DRM_DEBUG_DRIVER("Warning: HDMI sw reset timeout occurs\n");
+
+ val = hdmi_read(hdmi, HDMI_CFG);
+ val &= ~HDMI_CFG_SW_RST_EN;
+ hdmi_write(hdmi, val, HDMI_CFG);
+
+ /* Disable hdmi_audio clock. Not used anymore for drm purpose */
+ clk_disable_unprepare(hdmi->clk_audio);
+}
+
+static void sti_hdmi_disable(struct drm_bridge *bridge)
+{
+ struct sti_hdmi *hdmi = bridge->driver_private;
+
+ u32 val = hdmi_read(hdmi, HDMI_CFG);
+
+ if (!hdmi->enabled)
+ return;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Disable HDMI */
+ val &= ~HDMI_CFG_DEVICE_EN;
+ hdmi_write(hdmi, val, HDMI_CFG);
+
+ hdmi_write(hdmi, 0xffffffff, HDMI_INT_CLR);
+
+ /* Stop the phy */
+ hdmi->phy_ops->stop(hdmi);
+
+ /* Set the default channel data to be a dark red */
+ hdmi_write(hdmi, 0x0000, HDMI_DFLT_CHL0_DAT);
+ hdmi_write(hdmi, 0x0000, HDMI_DFLT_CHL1_DAT);
+ hdmi_write(hdmi, 0x0060, HDMI_DFLT_CHL2_DAT);
+
+ /* Disable/unprepare hdmi clock */
+ clk_disable_unprepare(hdmi->clk_phy);
+ clk_disable_unprepare(hdmi->clk_tmds);
+ clk_disable_unprepare(hdmi->clk_pix);
+
+ hdmi->enabled = false;
+}
+
+static void sti_hdmi_pre_enable(struct drm_bridge *bridge)
+{
+ struct sti_hdmi *hdmi = bridge->driver_private;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (hdmi->enabled)
+ return;
+
+ /* Prepare/enable clocks */
+ if (clk_prepare_enable(hdmi->clk_pix))
+ DRM_ERROR("Failed to prepare/enable hdmi_pix clk\n");
+ if (clk_prepare_enable(hdmi->clk_tmds))
+ DRM_ERROR("Failed to prepare/enable hdmi_tmds clk\n");
+ if (clk_prepare_enable(hdmi->clk_phy))
+ DRM_ERROR("Failed to prepare/enable hdmi_rejec_pll clk\n");
+
+ hdmi->enabled = true;
+
+ /* Program hdmi serializer and start phy */
+ if (!hdmi->phy_ops->start(hdmi)) {
+ DRM_ERROR("Unable to start hdmi phy\n");
+ return;
+ }
+
+ /* Program hdmi active area */
+ hdmi_active_area(hdmi);
+
+ /* Enable working interrupts */
+ hdmi_write(hdmi, HDMI_WORKING_INT, HDMI_INT_EN);
+
+ /* Program hdmi config */
+ hdmi_config(hdmi);
+
+ /* Program AVI infoframe */
+ if (hdmi_avi_infoframe_config(hdmi))
+ DRM_ERROR("Unable to configure AVI infoframe\n");
+
+ /* Sw reset */
+ hdmi_swreset(hdmi);
+}
+
+static void sti_hdmi_set_mode(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sti_hdmi *hdmi = bridge->driver_private;
+ int ret;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Copy the drm display mode in the connector local structure */
+ memcpy(&hdmi->mode, mode, sizeof(struct drm_display_mode));
+
+ /* Update clock framerate according to the selected mode */
+ ret = clk_set_rate(hdmi->clk_pix, mode->clock * 1000);
+ if (ret < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for hdmi_pix clk\n",
+ mode->clock * 1000);
+ return;
+ }
+ ret = clk_set_rate(hdmi->clk_phy, mode->clock * 1000);
+ if (ret < 0) {
+ DRM_ERROR("Cannot set rate (%dHz) for hdmi_rejection_pll clk\n",
+ mode->clock * 1000);
+ return;
+ }
+}
+
+static void sti_hdmi_bridge_nope(struct drm_bridge *bridge)
+{
+ /* do nothing */
+}
+
+static void sti_hdmi_brigde_destroy(struct drm_bridge *bridge)
+{
+ drm_bridge_cleanup(bridge);
+ kfree(bridge);
+}
+
+static const struct drm_bridge_funcs sti_hdmi_bridge_funcs = {
+ .pre_enable = sti_hdmi_pre_enable,
+ .enable = sti_hdmi_bridge_nope,
+ .disable = sti_hdmi_disable,
+ .post_disable = sti_hdmi_bridge_nope,
+ .mode_set = sti_hdmi_set_mode,
+ .destroy = sti_hdmi_brigde_destroy,
+};
+
+static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
+{
+ struct i2c_adapter *i2c_adap;
+ struct edid *edid;
+ int count;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ i2c_adap = i2c_get_adapter(1);
+ if (!i2c_adap)
+ goto fail;
+
+ edid = drm_get_edid(connector, i2c_adap);
+ if (!edid)
+ goto fail;
+
+ count = drm_add_edid_modes(connector, edid);
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ kfree(edid);
+ return count;
+
+fail:
+ DRM_ERROR("Can not read HDMI EDID\n");
+ return 0;
+}
+
+#define CLK_TOLERANCE_HZ 50
+
+static int sti_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int target = mode->clock * 1000;
+ int target_min = target - CLK_TOLERANCE_HZ;
+ int target_max = target + CLK_TOLERANCE_HZ;
+ int result;
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+ struct sti_hdmi *hdmi = hdmi_connector->hdmi;
+
+
+ result = clk_round_rate(hdmi->clk_pix, target);
+
+ DRM_DEBUG_DRIVER("target rate = %d => available rate = %d\n",
+ target, result);
+
+ if ((result < target_min) || (result > target_max)) {
+ DRM_DEBUG_DRIVER("hdmi pixclk=%d not supported\n", target);
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
+struct drm_encoder *sti_hdmi_best_encoder(struct drm_connector *connector)
+{
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+
+ /* Best encoder is the one associated during connector creation */
+ return hdmi_connector->encoder;
+}
+
+static struct drm_connector_helper_funcs sti_hdmi_connector_helper_funcs = {
+ .get_modes = sti_hdmi_connector_get_modes,
+ .mode_valid = sti_hdmi_connector_mode_valid,
+ .best_encoder = sti_hdmi_best_encoder,
+};
+
+/* get detection status of display device */
+static enum drm_connector_status
+sti_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+ struct sti_hdmi *hdmi = hdmi_connector->hdmi;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (hdmi->hpd) {
+ DRM_DEBUG_DRIVER("hdmi cable connected\n");
+ return connector_status_connected;
+ }
+
+ DRM_DEBUG_DRIVER("hdmi cable disconnected\n");
+ return connector_status_disconnected;
+}
+
+static void sti_hdmi_connector_destroy(struct drm_connector *connector)
+{
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(hdmi_connector);
+}
+
+static struct drm_connector_funcs sti_hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = sti_hdmi_connector_detect,
+ .destroy = sti_hdmi_connector_destroy,
+};
+
+static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
+ return encoder;
+ }
+
+ return NULL;
+}
+
+static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct sti_hdmi *hdmi = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct drm_encoder *encoder;
+ struct sti_hdmi_connector *connector;
+ struct drm_connector *drm_connector;
+ struct drm_bridge *bridge;
+ struct i2c_adapter *i2c_adap;
+ int err;
+
+ i2c_adap = i2c_get_adapter(1);
+ if (!i2c_adap)
+ return -EPROBE_DEFER;
+
+ /* Set the drm device handle */
+ hdmi->drm_dev = drm_dev;
+
+ encoder = sti_hdmi_find_encoder(drm_dev);
+ if (!encoder)
+ return -ENOMEM;
+
+ connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return -ENOMEM;
+
+ connector->hdmi = hdmi;
+
+ bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->driver_private = hdmi;
+ drm_bridge_init(drm_dev, bridge, &sti_hdmi_bridge_funcs);
+
+ encoder->bridge = bridge;
+ connector->encoder = encoder;
+
+ drm_connector = (struct drm_connector *)connector;
+
+ drm_connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ drm_connector_init(drm_dev, drm_connector,
+ &sti_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_helper_add(drm_connector,
+ &sti_hdmi_connector_helper_funcs);
+
+ err = drm_connector_register(drm_connector);
+ if (err)
+ goto err_connector;
+
+ err = drm_mode_connector_attach_encoder(drm_connector, encoder);
+ if (err) {
+ DRM_ERROR("Failed to attach a connector to a encoder\n");
+ goto err_sysfs;
+ }
+
+ /* Enable default interrupts */
+ hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
+
+ return 0;
+
+err_sysfs:
+ drm_connector_unregister(drm_connector);
+err_connector:
+ drm_bridge_cleanup(bridge);
+ drm_connector_cleanup(drm_connector);
+ return -EINVAL;
+}
+
+static void sti_hdmi_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+ /* do nothing */
+}
+
+static const struct component_ops sti_hdmi_ops = {
+ .bind = sti_hdmi_bind,
+ .unbind = sti_hdmi_unbind,
+};
+
+static struct of_device_id hdmi_of_match[] = {
+ {
+ .compatible = "st,stih416-hdmi",
+ .data = &tx3g0c55phy_ops,
+ }, {
+ .compatible = "st,stih407-hdmi",
+ .data = &tx3g4c28phy_ops,
+ }, {
+ /* end node */
+ }
+};
+MODULE_DEVICE_TABLE(of, hdmi_of_match);
+
+static int sti_hdmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sti_hdmi *hdmi;
+ struct device_node *np = dev->of_node;
+ struct resource *res;
+ int ret;
+
+ DRM_INFO("%s\n", __func__);
+
+ hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->dev = pdev->dev;
+
+ /* Get resources */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi-reg");
+ if (!res) {
+ DRM_ERROR("Invalid hdmi resource\n");
+ return -ENOMEM;
+ }
+ hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (IS_ERR(hdmi->regs))
+ return PTR_ERR(hdmi->regs);
+
+ if (of_device_is_compatible(np, "st,stih416-hdmi")) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "syscfg");
+ if (!res) {
+ DRM_ERROR("Invalid syscfg resource\n");
+ return -ENOMEM;
+ }
+ hdmi->syscfg = devm_ioremap_nocache(dev, res->start,
+ resource_size(res));
+ if (IS_ERR(hdmi->syscfg))
+ return PTR_ERR(hdmi->syscfg);
+
+ }
+
+ hdmi->phy_ops = (struct hdmi_phy_ops *)
+ of_match_node(hdmi_of_match, np)->data;
+
+ /* Get clock resources */
+ hdmi->clk_pix = devm_clk_get(dev, "pix");
+ if (IS_ERR(hdmi->clk_pix)) {
+ DRM_ERROR("Cannot get hdmi_pix clock\n");
+ return PTR_ERR(hdmi->clk_pix);
+ }
+
+ hdmi->clk_tmds = devm_clk_get(dev, "tmds");
+ if (IS_ERR(hdmi->clk_tmds)) {
+ DRM_ERROR("Cannot get hdmi_tmds clock\n");
+ return PTR_ERR(hdmi->clk_tmds);
+ }
+
+ hdmi->clk_phy = devm_clk_get(dev, "phy");
+ if (IS_ERR(hdmi->clk_phy)) {
+ DRM_ERROR("Cannot get hdmi_phy clock\n");
+ return PTR_ERR(hdmi->clk_phy);
+ }
+
+ hdmi->clk_audio = devm_clk_get(dev, "audio");
+ if (IS_ERR(hdmi->clk_audio)) {
+ DRM_ERROR("Cannot get hdmi_audio clock\n");
+ return PTR_ERR(hdmi->clk_audio);
+ }
+
+ hdmi->hpd_gpio = of_get_named_gpio(np, "hdmi,hpd-gpio", 0);
+ if (hdmi->hpd_gpio < 0) {
+ DRM_ERROR("Failed to get hdmi hpd-gpio\n");
+ return -EIO;
+ }
+
+ hdmi->hpd = gpio_get_value(hdmi->hpd_gpio);
+
+ init_waitqueue_head(&hdmi->wait_event);
+
+ hdmi->irq = platform_get_irq_byname(pdev, "irq");
+
+ ret = devm_request_threaded_irq(dev, hdmi->irq, hdmi_irq,
+ hdmi_irq_thread, IRQF_ONESHOT, dev_name(dev), hdmi);
+ if (ret) {
+ DRM_ERROR("Failed to register HDMI interrupt\n");
+ return ret;
+ }
+
+ hdmi->reset = devm_reset_control_get(dev, "hdmi");
+ /* Take hdmi out of reset */
+ if (!IS_ERR(hdmi->reset))
+ reset_control_deassert(hdmi->reset);
+
+ platform_set_drvdata(pdev, hdmi);
+
+ return component_add(&pdev->dev, &sti_hdmi_ops);
+}
+
+static int sti_hdmi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sti_hdmi_ops);
+ return 0;
+}
+
+struct platform_driver sti_hdmi_driver = {
+ .driver = {
+ .name = "sti-hdmi",
+ .owner = THIS_MODULE,
+ .of_match_table = hdmi_of_match,
+ },
+ .probe = sti_hdmi_probe,
+ .remove = sti_hdmi_remove,
+};
+
+module_platform_driver(sti_hdmi_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
new file mode 100644
index 000000000000..61bec6557ceb
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_HDMI_H_
+#define _STI_HDMI_H_
+
+#include <linux/platform_device.h>
+
+#include <drm/drmP.h>
+
+#define HDMI_STA 0x0010
+#define HDMI_STA_DLL_LCK BIT(5)
+
+struct sti_hdmi;
+
+struct hdmi_phy_ops {
+ bool (*start)(struct sti_hdmi *hdmi);
+ void (*stop)(struct sti_hdmi *hdmi);
+};
+
+/**
+ * STI hdmi structure
+ *
+ * @dev: driver device
+ * @drm_dev: pointer to drm device
+ * @mode: current display mode selected
+ * @regs: hdmi register
+ * @syscfg: syscfg register for pll rejection configuration
+ * @clk_pix: hdmi pixel clock
+ * @clk_tmds: hdmi tmds clock
+ * @clk_phy: hdmi phy clock
+ * @clk_audio: hdmi audio clock
+ * @irq: hdmi interrupt number
+ * @irq_status: interrupt status register
+ * @phy_ops: phy start/stop operations
+ * @enabled: true if hdmi is enabled else false
+ * @hpd_gpio: hdmi hot plug detect gpio number
+ * @hpd: hot plug detect status
+ * @wait_event: wait event
+ * @event_received: wait event status
+ * @reset: reset control of the hdmi phy
+ */
+struct sti_hdmi {
+ struct device dev;
+ struct drm_device *drm_dev;
+ struct drm_display_mode mode;
+ void __iomem *regs;
+ void __iomem *syscfg;
+ struct clk *clk_pix;
+ struct clk *clk_tmds;
+ struct clk *clk_phy;
+ struct clk *clk_audio;
+ int irq;
+ u32 irq_status;
+ struct hdmi_phy_ops *phy_ops;
+ bool enabled;
+ int hpd_gpio;
+ bool hpd;
+ wait_queue_head_t wait_event;
+ bool event_received;
+ struct reset_control *reset;
+};
+
+u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
+void hdmi_write(struct sti_hdmi *hdmi, u32 val, int offset);
+
+/**
+ * hdmi phy config structure
+ *
+ * A pointer to an array of these structures is passed to a TMDS (HDMI) output
+ * via the control interface to provide board and SoC specific
+ * configurations of the HDMI PHY. Each entry in the array specifies a hardware
+ * specific configuration for a given TMDS clock frequency range.
+ *
+ * @min_tmds_freq: Lower bound of TMDS clock frequency this entry applies to
+ * @max_tmds_freq: Upper bound of TMDS clock frequency this entry applies to
+ * @config: SoC specific register configuration
+ */
+struct hdmi_phy_config {
+ u32 min_tmds_freq;
+ u32 max_tmds_freq;
+ u32 config[4];
+};
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c
new file mode 100644
index 000000000000..49ae8e44b285
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include "sti_hdmi_tx3g0c55phy.h"
+
+#define HDMI_SRZ_PLL_CFG 0x0504
+#define HDMI_SRZ_TAP_1 0x0508
+#define HDMI_SRZ_TAP_2 0x050C
+#define HDMI_SRZ_TAP_3 0x0510
+#define HDMI_SRZ_CTRL 0x0514
+
+#define HDMI_SRZ_PLL_CFG_POWER_DOWN BIT(0)
+#define HDMI_SRZ_PLL_CFG_VCOR_SHIFT 1
+#define HDMI_SRZ_PLL_CFG_VCOR_425MHZ 0
+#define HDMI_SRZ_PLL_CFG_VCOR_850MHZ 1
+#define HDMI_SRZ_PLL_CFG_VCOR_1700MHZ 2
+#define HDMI_SRZ_PLL_CFG_VCOR_3000MHZ 3
+#define HDMI_SRZ_PLL_CFG_VCOR_MASK 3
+#define HDMI_SRZ_PLL_CFG_VCOR(x) (x << HDMI_SRZ_PLL_CFG_VCOR_SHIFT)
+#define HDMI_SRZ_PLL_CFG_NDIV_SHIFT 8
+#define HDMI_SRZ_PLL_CFG_NDIV_MASK (0x1F << HDMI_SRZ_PLL_CFG_NDIV_SHIFT)
+#define HDMI_SRZ_PLL_CFG_MODE_SHIFT 16
+#define HDMI_SRZ_PLL_CFG_MODE_13_5_MHZ 0x1
+#define HDMI_SRZ_PLL_CFG_MODE_25_2_MHZ 0x4
+#define HDMI_SRZ_PLL_CFG_MODE_27_MHZ 0x5
+#define HDMI_SRZ_PLL_CFG_MODE_33_75_MHZ 0x6
+#define HDMI_SRZ_PLL_CFG_MODE_40_5_MHZ 0x7
+#define HDMI_SRZ_PLL_CFG_MODE_54_MHZ 0x8
+#define HDMI_SRZ_PLL_CFG_MODE_67_5_MHZ 0x9
+#define HDMI_SRZ_PLL_CFG_MODE_74_25_MHZ 0xA
+#define HDMI_SRZ_PLL_CFG_MODE_81_MHZ 0xB
+#define HDMI_SRZ_PLL_CFG_MODE_82_5_MHZ 0xC
+#define HDMI_SRZ_PLL_CFG_MODE_108_MHZ 0xD
+#define HDMI_SRZ_PLL_CFG_MODE_148_5_MHZ 0xE
+#define HDMI_SRZ_PLL_CFG_MODE_165_MHZ 0xF
+#define HDMI_SRZ_PLL_CFG_MODE_MASK 0xF
+#define HDMI_SRZ_PLL_CFG_MODE(x) (x << HDMI_SRZ_PLL_CFG_MODE_SHIFT)
+
+#define HDMI_SRZ_CTRL_POWER_DOWN (1 << 0)
+#define HDMI_SRZ_CTRL_EXTERNAL_DATA_EN (1 << 1)
+
+/* sysconf registers */
+#define HDMI_REJECTION_PLL_CONFIGURATION 0x0858 /* SYSTEM_CONFIG2534 */
+#define HDMI_REJECTION_PLL_STATUS 0x0948 /* SYSTEM_CONFIG2594 */
+
+#define REJECTION_PLL_HDMI_ENABLE_SHIFT 0
+#define REJECTION_PLL_HDMI_ENABLE_MASK (0x1 << REJECTION_PLL_HDMI_ENABLE_SHIFT)
+#define REJECTION_PLL_HDMI_PDIV_SHIFT 24
+#define REJECTION_PLL_HDMI_PDIV_MASK (0x7 << REJECTION_PLL_HDMI_PDIV_SHIFT)
+#define REJECTION_PLL_HDMI_NDIV_SHIFT 16
+#define REJECTION_PLL_HDMI_NDIV_MASK (0xFF << REJECTION_PLL_HDMI_NDIV_SHIFT)
+#define REJECTION_PLL_HDMI_MDIV_SHIFT 8
+#define REJECTION_PLL_HDMI_MDIV_MASK (0xFF << REJECTION_PLL_HDMI_MDIV_SHIFT)
+
+#define REJECTION_PLL_HDMI_REJ_PLL_LOCK BIT(0)
+
+#define HDMI_TIMEOUT_PLL_LOCK 50 /*milliseconds */
+
+/**
+ * pll mode structure
+ *
+ * A pointer to an array of these structures is passed to a TMDS (HDMI) output
+ * via the control interface to provide board and SoC specific
+ * configurations of the HDMI PHY. Each entry in the array specifies a hardware
+ * specific configuration for a given TMDS clock frequency range. The array
+ * should be terminated with an entry that has all fields set to zero.
+ *
+ * @min: Lower bound of TMDS clock frequency this entry applies to
+ * @max: Upper bound of TMDS clock frequency this entry applies to
+ * @mode: SoC specific register configuration
+ */
+struct pllmode {
+ u32 min;
+ u32 max;
+ u32 mode;
+};
+
+#define NB_PLL_MODE 7
+static struct pllmode pllmodes[NB_PLL_MODE] = {
+ {13500000, 13513500, HDMI_SRZ_PLL_CFG_MODE_13_5_MHZ},
+ {25174800, 25200000, HDMI_SRZ_PLL_CFG_MODE_25_2_MHZ},
+ {27000000, 27027000, HDMI_SRZ_PLL_CFG_MODE_27_MHZ},
+ {54000000, 54054000, HDMI_SRZ_PLL_CFG_MODE_54_MHZ},
+ {72000000, 74250000, HDMI_SRZ_PLL_CFG_MODE_74_25_MHZ},
+ {108000000, 108108000, HDMI_SRZ_PLL_CFG_MODE_108_MHZ},
+ {148351648, 297000000, HDMI_SRZ_PLL_CFG_MODE_148_5_MHZ}
+};
+
+#define NB_HDMI_PHY_CONFIG 5
+static struct hdmi_phy_config hdmiphy_config[NB_HDMI_PHY_CONFIG] = {
+ {0, 40000000, {0x00101010, 0x00101010, 0x00101010, 0x02} },
+ {40000000, 140000000, {0x00111111, 0x00111111, 0x00111111, 0x02} },
+ {140000000, 160000000, {0x00131313, 0x00101010, 0x00101010, 0x02} },
+ {160000000, 250000000, {0x00131313, 0x00111111, 0x00111111, 0x03FE} },
+ {250000000, 300000000, {0x00151515, 0x00101010, 0x00101010, 0x03FE} },
+};
+
+#define PLL_CHANGE_DELAY 1 /* ms */
+
+/**
+ * Disable the pll rejection
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ * return true if the pll has been disabled
+ */
+static bool disable_pll_rejection(struct sti_hdmi *hdmi)
+{
+ u32 val;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
+ val &= ~REJECTION_PLL_HDMI_ENABLE_MASK;
+ writel(val, hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
+
+ msleep(PLL_CHANGE_DELAY);
+ val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_STATUS);
+
+ return !(val & REJECTION_PLL_HDMI_REJ_PLL_LOCK);
+}
+
+/**
+ * Enable the old BCH/rejection PLL is now reused to provide the CLKPXPLL
+ * clock input to the new PHY PLL that generates the serializer clock
+ * (TMDS*10) and the TMDS clock which is now fed back into the HDMI
+ * formatter instead of the TMDS clock line from ClockGenB.
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ * return true if pll has been correctly set
+ */
+static bool enable_pll_rejection(struct sti_hdmi *hdmi)
+{
+ unsigned int inputclock;
+ u32 mdiv, ndiv, pdiv, val;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (!disable_pll_rejection(hdmi))
+ return false;
+
+ inputclock = hdmi->mode.clock * 1000;
+
+ DRM_DEBUG_DRIVER("hdmi rejection pll input clock = %dHz\n", inputclock);
+
+
+ /* Power up the HDMI rejection PLL
+ * Note: On this SoC (stiH416) we are forced to have the input clock
+ * be equal to the HDMI pixel clock.
+ *
+ * The values here have been suggested by validation however they are
+ * still provisional and subject to change.
+ *
+ * PLLout = (Fin*Mdiv) / ((2 * Ndiv) / 2^Pdiv)
+ */
+ if (inputclock < 50000000) {
+ /*
+ * For slower clocks we need to multiply more to keep the
+ * internal VCO frequency within the physical specification
+ * of the PLL.
+ */
+ pdiv = 4;
+ ndiv = 240;
+ mdiv = 30;
+ } else {
+ pdiv = 2;
+ ndiv = 60;
+ mdiv = 30;
+ }
+
+ val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
+
+ val &= ~(REJECTION_PLL_HDMI_PDIV_MASK |
+ REJECTION_PLL_HDMI_NDIV_MASK |
+ REJECTION_PLL_HDMI_MDIV_MASK |
+ REJECTION_PLL_HDMI_ENABLE_MASK);
+
+ val |= (pdiv << REJECTION_PLL_HDMI_PDIV_SHIFT) |
+ (ndiv << REJECTION_PLL_HDMI_NDIV_SHIFT) |
+ (mdiv << REJECTION_PLL_HDMI_MDIV_SHIFT) |
+ (0x1 << REJECTION_PLL_HDMI_ENABLE_SHIFT);
+
+ writel(val, hdmi->syscfg + HDMI_REJECTION_PLL_CONFIGURATION);
+
+ msleep(PLL_CHANGE_DELAY);
+ val = readl(hdmi->syscfg + HDMI_REJECTION_PLL_STATUS);
+
+ return (val & REJECTION_PLL_HDMI_REJ_PLL_LOCK);
+}
+
+/**
+ * Start hdmi phy macro cell tx3g0c55
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ * Return false if an error occur
+ */
+static bool sti_hdmi_tx3g0c55phy_start(struct sti_hdmi *hdmi)
+{
+ u32 ckpxpll = hdmi->mode.clock * 1000;
+ u32 val, tmdsck, freqvco, pllctrl = 0;
+ unsigned int i;
+
+ if (!enable_pll_rejection(hdmi))
+ return false;
+
+ DRM_DEBUG_DRIVER("ckpxpll = %dHz\n", ckpxpll);
+
+ /* Assuming no pixel repetition and 24bits color */
+ tmdsck = ckpxpll;
+ pllctrl = 2 << HDMI_SRZ_PLL_CFG_NDIV_SHIFT;
+
+ /*
+ * Setup the PLL mode parameter based on the ckpxpll. If we haven't got
+ * a clock frequency supported by one of the specific PLL modes then we
+ * will end up using the generic mode (0) which only supports a 10x
+ * multiplier, hence only 24bit color.
+ */
+ for (i = 0; i < NB_PLL_MODE; i++) {
+ if (ckpxpll >= pllmodes[i].min && ckpxpll <= pllmodes[i].max)
+ pllctrl |= HDMI_SRZ_PLL_CFG_MODE(pllmodes[i].mode);
+ }
+
+ freqvco = tmdsck * 10;
+ if (freqvco <= 425000000UL)
+ pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_425MHZ);
+ else if (freqvco <= 850000000UL)
+ pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_850MHZ);
+ else if (freqvco <= 1700000000UL)
+ pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_1700MHZ);
+ else if (freqvco <= 2970000000UL)
+ pllctrl |= HDMI_SRZ_PLL_CFG_VCOR(HDMI_SRZ_PLL_CFG_VCOR_3000MHZ);
+ else {
+ DRM_ERROR("PHY serializer clock out of range\n");
+ goto err;
+ }
+
+ /*
+ * Configure and power up the PHY PLL
+ */
+ hdmi->event_received = false;
+ DRM_DEBUG_DRIVER("pllctrl = 0x%x\n", pllctrl);
+ hdmi_write(hdmi, pllctrl, HDMI_SRZ_PLL_CFG);
+
+ /* wait PLL interrupt */
+ wait_event_interruptible_timeout(hdmi->wait_event,
+ hdmi->event_received == true,
+ msecs_to_jiffies
+ (HDMI_TIMEOUT_PLL_LOCK));
+
+ if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK) == 0) {
+ DRM_ERROR("hdmi phy pll not locked\n");
+ goto err;
+ }
+
+ DRM_DEBUG_DRIVER("got PHY PLL Lock\n");
+
+ /*
+ * To configure the source termination and pre-emphasis appropriately
+ * for different high speed TMDS clock frequencies a phy configuration
+ * table must be provided, tailored to the SoC and board combination.
+ */
+ for (i = 0; i < NB_HDMI_PHY_CONFIG; i++) {
+ if ((hdmiphy_config[i].min_tmds_freq <= tmdsck) &&
+ (hdmiphy_config[i].max_tmds_freq >= tmdsck)) {
+ val = hdmiphy_config[i].config[0];
+ hdmi_write(hdmi, val, HDMI_SRZ_TAP_1);
+ val = hdmiphy_config[i].config[1];
+ hdmi_write(hdmi, val, HDMI_SRZ_TAP_2);
+ val = hdmiphy_config[i].config[2];
+ hdmi_write(hdmi, val, HDMI_SRZ_TAP_3);
+ val = hdmiphy_config[i].config[3];
+ val |= HDMI_SRZ_CTRL_EXTERNAL_DATA_EN;
+ val &= ~HDMI_SRZ_CTRL_POWER_DOWN;
+ hdmi_write(hdmi, val, HDMI_SRZ_CTRL);
+
+ DRM_DEBUG_DRIVER("serializer cfg 0x%x 0x%x 0x%x 0x%x\n",
+ hdmiphy_config[i].config[0],
+ hdmiphy_config[i].config[1],
+ hdmiphy_config[i].config[2],
+ hdmiphy_config[i].config[3]);
+ return true;
+ }
+ }
+
+ /*
+ * Default, power up the serializer with no pre-emphasis or source
+ * termination.
+ */
+ hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_1);
+ hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_2);
+ hdmi_write(hdmi, 0x0, HDMI_SRZ_TAP_3);
+ hdmi_write(hdmi, HDMI_SRZ_CTRL_EXTERNAL_DATA_EN, HDMI_SRZ_CTRL);
+
+ return true;
+
+err:
+ disable_pll_rejection(hdmi);
+
+ return false;
+}
+
+/**
+ * Stop hdmi phy macro cell tx3g0c55
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ */
+static void sti_hdmi_tx3g0c55phy_stop(struct sti_hdmi *hdmi)
+{
+ DRM_DEBUG_DRIVER("\n");
+
+ hdmi->event_received = false;
+
+ hdmi_write(hdmi, HDMI_SRZ_CTRL_POWER_DOWN, HDMI_SRZ_CTRL);
+ hdmi_write(hdmi, HDMI_SRZ_PLL_CFG_POWER_DOWN, HDMI_SRZ_PLL_CFG);
+
+ /* wait PLL interrupt */
+ wait_event_interruptible_timeout(hdmi->wait_event,
+ hdmi->event_received == true,
+ msecs_to_jiffies
+ (HDMI_TIMEOUT_PLL_LOCK));
+
+ if (hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK)
+ DRM_ERROR("hdmi phy pll not well disabled\n");
+
+ disable_pll_rejection(hdmi);
+}
+
+struct hdmi_phy_ops tx3g0c55phy_ops = {
+ .start = sti_hdmi_tx3g0c55phy_start,
+ .stop = sti_hdmi_tx3g0c55phy_stop,
+};
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h
new file mode 100644
index 000000000000..068237b3a303
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g0c55phy.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_HDMI_TX3G0C55PHY_H_
+#define _STI_HDMI_TX3G0C55PHY_H_
+
+#include "sti_hdmi.h"
+
+extern struct hdmi_phy_ops tx3g0c55phy_ops;
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
new file mode 100644
index 000000000000..8e0ceb0ced33
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include "sti_hdmi_tx3g4c28phy.h"
+
+#define HDMI_SRZ_CFG 0x504
+#define HDMI_SRZ_PLL_CFG 0x510
+#define HDMI_SRZ_ICNTL 0x518
+#define HDMI_SRZ_CALCODE_EXT 0x520
+
+#define HDMI_SRZ_CFG_EN BIT(0)
+#define HDMI_SRZ_CFG_DISABLE_BYPASS_SINK_CURRENT BIT(1)
+#define HDMI_SRZ_CFG_EXTERNAL_DATA BIT(16)
+#define HDMI_SRZ_CFG_RBIAS_EXT BIT(17)
+#define HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION BIT(18)
+#define HDMI_SRZ_CFG_EN_BIASRES_DETECTION BIT(19)
+#define HDMI_SRZ_CFG_EN_SRC_TERMINATION BIT(24)
+
+#define HDMI_SRZ_CFG_INTERNAL_MASK (HDMI_SRZ_CFG_EN | \
+ HDMI_SRZ_CFG_DISABLE_BYPASS_SINK_CURRENT | \
+ HDMI_SRZ_CFG_EXTERNAL_DATA | \
+ HDMI_SRZ_CFG_RBIAS_EXT | \
+ HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION | \
+ HDMI_SRZ_CFG_EN_BIASRES_DETECTION | \
+ HDMI_SRZ_CFG_EN_SRC_TERMINATION)
+
+#define PLL_CFG_EN BIT(0)
+#define PLL_CFG_NDIV_SHIFT (8)
+#define PLL_CFG_IDF_SHIFT (16)
+#define PLL_CFG_ODF_SHIFT (24)
+
+#define ODF_DIV_1 (0)
+#define ODF_DIV_2 (1)
+#define ODF_DIV_4 (2)
+#define ODF_DIV_8 (3)
+
+#define HDMI_TIMEOUT_PLL_LOCK 50 /*milliseconds */
+
+struct plldividers_s {
+ uint32_t min;
+ uint32_t max;
+ uint32_t idf;
+ uint32_t odf;
+};
+
+/*
+ * Functional specification recommended values
+ */
+#define NB_PLL_MODE 5
+static struct plldividers_s plldividers[NB_PLL_MODE] = {
+ {0, 20000000, 1, ODF_DIV_8},
+ {20000000, 42500000, 2, ODF_DIV_8},
+ {42500000, 85000000, 4, ODF_DIV_4},
+ {85000000, 170000000, 8, ODF_DIV_2},
+ {170000000, 340000000, 16, ODF_DIV_1}
+};
+
+#define NB_HDMI_PHY_CONFIG 2
+static struct hdmi_phy_config hdmiphy_config[NB_HDMI_PHY_CONFIG] = {
+ {0, 250000000, {0x0, 0x0, 0x0, 0x0} },
+ {250000000, 300000000, {0x1110, 0x0, 0x0, 0x0} },
+};
+
+/**
+ * Start hdmi phy macro cell tx3g4c28
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ *
+ * Return false if an error occur
+ */
+static bool sti_hdmi_tx3g4c28phy_start(struct sti_hdmi *hdmi)
+{
+ u32 ckpxpll = hdmi->mode.clock * 1000;
+ u32 val, tmdsck, idf, odf, pllctrl = 0;
+ bool foundplldivides = false;
+ int i;
+
+ DRM_DEBUG_DRIVER("ckpxpll = %dHz\n", ckpxpll);
+
+ for (i = 0; i < NB_PLL_MODE; i++) {
+ if (ckpxpll >= plldividers[i].min &&
+ ckpxpll < plldividers[i].max) {
+ idf = plldividers[i].idf;
+ odf = plldividers[i].odf;
+ foundplldivides = true;
+ break;
+ }
+ }
+
+ if (!foundplldivides) {
+ DRM_ERROR("input TMDS clock speed (%d) not supported\n",
+ ckpxpll);
+ goto err;
+ }
+
+ /* Assuming no pixel repetition and 24bits color */
+ tmdsck = ckpxpll;
+ pllctrl |= 40 << PLL_CFG_NDIV_SHIFT;
+
+ if (tmdsck > 340000000) {
+ DRM_ERROR("output TMDS clock (%d) out of range\n", tmdsck);
+ goto err;
+ }
+
+ pllctrl |= idf << PLL_CFG_IDF_SHIFT;
+ pllctrl |= odf << PLL_CFG_ODF_SHIFT;
+
+ /*
+ * Configure and power up the PHY PLL
+ */
+ hdmi->event_received = false;
+ DRM_DEBUG_DRIVER("pllctrl = 0x%x\n", pllctrl);
+ hdmi_write(hdmi, (pllctrl | PLL_CFG_EN), HDMI_SRZ_PLL_CFG);
+
+ /* wait PLL interrupt */
+ wait_event_interruptible_timeout(hdmi->wait_event,
+ hdmi->event_received == true,
+ msecs_to_jiffies
+ (HDMI_TIMEOUT_PLL_LOCK));
+
+ if ((hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK) == 0) {
+ DRM_ERROR("hdmi phy pll not locked\n");
+ goto err;
+ }
+
+ DRM_DEBUG_DRIVER("got PHY PLL Lock\n");
+
+ val = (HDMI_SRZ_CFG_EN |
+ HDMI_SRZ_CFG_EXTERNAL_DATA |
+ HDMI_SRZ_CFG_EN_BIASRES_DETECTION |
+ HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION);
+
+ if (tmdsck > 165000000)
+ val |= HDMI_SRZ_CFG_EN_SRC_TERMINATION;
+
+ /*
+ * To configure the source termination and pre-emphasis appropriately
+ * for different high speed TMDS clock frequencies a phy configuration
+ * table must be provided, tailored to the SoC and board combination.
+ */
+ for (i = 0; i < NB_HDMI_PHY_CONFIG; i++) {
+ if ((hdmiphy_config[i].min_tmds_freq <= tmdsck) &&
+ (hdmiphy_config[i].max_tmds_freq >= tmdsck)) {
+ val |= (hdmiphy_config[i].config[0]
+ & ~HDMI_SRZ_CFG_INTERNAL_MASK);
+ hdmi_write(hdmi, val, HDMI_SRZ_CFG);
+
+ val = hdmiphy_config[i].config[1];
+ hdmi_write(hdmi, val, HDMI_SRZ_ICNTL);
+
+ val = hdmiphy_config[i].config[2];
+ hdmi_write(hdmi, val, HDMI_SRZ_CALCODE_EXT);
+
+ DRM_DEBUG_DRIVER("serializer cfg 0x%x 0x%x 0x%x\n",
+ hdmiphy_config[i].config[0],
+ hdmiphy_config[i].config[1],
+ hdmiphy_config[i].config[2]);
+ return true;
+ }
+ }
+
+ /*
+ * Default, power up the serializer with no pre-emphasis or
+ * output swing correction
+ */
+ hdmi_write(hdmi, val, HDMI_SRZ_CFG);
+ hdmi_write(hdmi, 0x0, HDMI_SRZ_ICNTL);
+ hdmi_write(hdmi, 0x0, HDMI_SRZ_CALCODE_EXT);
+
+ return true;
+
+err:
+ return false;
+}
+
+/**
+ * Stop hdmi phy macro cell tx3g4c28
+ *
+ * @hdmi: pointer on the hdmi internal structure
+ */
+static void sti_hdmi_tx3g4c28phy_stop(struct sti_hdmi *hdmi)
+{
+ int val = 0;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ hdmi->event_received = false;
+
+ val = HDMI_SRZ_CFG_EN_SINK_TERM_DETECTION;
+ val |= HDMI_SRZ_CFG_EN_BIASRES_DETECTION;
+
+ hdmi_write(hdmi, val, HDMI_SRZ_CFG);
+ hdmi_write(hdmi, 0, HDMI_SRZ_PLL_CFG);
+
+ /* wait PLL interrupt */
+ wait_event_interruptible_timeout(hdmi->wait_event,
+ hdmi->event_received == true,
+ msecs_to_jiffies
+ (HDMI_TIMEOUT_PLL_LOCK));
+
+ if (hdmi_read(hdmi, HDMI_STA) & HDMI_STA_DLL_LCK)
+ DRM_ERROR("hdmi phy pll not well disabled\n");
+}
+
+struct hdmi_phy_ops tx3g4c28phy_ops = {
+ .start = sti_hdmi_tx3g4c28phy_start,
+ .stop = sti_hdmi_tx3g4c28phy_stop,
+};
diff --git a/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h
new file mode 100644
index 000000000000..f99a7ff281ef
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hdmi_tx3g4c28phy.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_HDMI_TX3G4C28PHY_H_
+#define _STI_HDMI_TX3G4C28PHY_H_
+
+#include "sti_hdmi.h"
+
+extern struct hdmi_phy_ops tx3g4c28phy_ops;
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_layer.c b/drivers/gpu/drm/sti/sti_layer.c
new file mode 100644
index 000000000000..06a587c4f1bb
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_layer.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "sti_compositor.h"
+#include "sti_gdp.h"
+#include "sti_layer.h"
+#include "sti_vid.h"
+
+const char *sti_layer_to_str(struct sti_layer *layer)
+{
+ switch (layer->desc) {
+ case STI_GDP_0:
+ return "GDP0";
+ case STI_GDP_1:
+ return "GDP1";
+ case STI_GDP_2:
+ return "GDP2";
+ case STI_GDP_3:
+ return "GDP3";
+ case STI_VID_0:
+ return "VID0";
+ case STI_VID_1:
+ return "VID1";
+ case STI_CURSOR:
+ return "CURSOR";
+ default:
+ return "<UNKNOWN LAYER>";
+ }
+}
+
+struct sti_layer *sti_layer_create(struct device *dev, int desc,
+ void __iomem *baseaddr)
+{
+
+ struct sti_layer *layer = NULL;
+
+ switch (desc & STI_LAYER_TYPE_MASK) {
+ case STI_GDP:
+ layer = sti_gdp_create(dev, desc);
+ break;
+ case STI_VID:
+ layer = sti_vid_create(dev);
+ break;
+ }
+
+ if (!layer) {
+ DRM_ERROR("Failed to create layer\n");
+ return NULL;
+ }
+
+ layer->desc = desc;
+ layer->dev = dev;
+ layer->regs = baseaddr;
+
+ layer->ops->init(layer);
+
+ DRM_DEBUG_DRIVER("%s created\n", sti_layer_to_str(layer));
+
+ return layer;
+}
+
+int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
+ struct drm_display_mode *mode, int mixer_id,
+ int dest_x, int dest_y, int dest_w, int dest_h,
+ int src_x, int src_y, int src_w, int src_h)
+{
+ int ret;
+ unsigned int i;
+ struct drm_gem_cma_object *cma_obj;
+
+ if (!layer || !fb || !mode) {
+ DRM_ERROR("Null fb, layer or mode\n");
+ return 1;
+ }
+
+ cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ if (!cma_obj) {
+ DRM_ERROR("Can't get CMA GEM object for fb\n");
+ return 1;
+ }
+
+ layer->fb = fb;
+ layer->mode = mode;
+ layer->mixer_id = mixer_id;
+ layer->dst_x = dest_x;
+ layer->dst_y = dest_y;
+ layer->dst_w = clamp_val(dest_w, 0, mode->crtc_hdisplay - dest_x);
+ layer->dst_h = clamp_val(dest_h, 0, mode->crtc_vdisplay - dest_y);
+ layer->src_x = src_x;
+ layer->src_y = src_y;
+ layer->src_w = src_w;
+ layer->src_h = src_h;
+ layer->format = fb->pixel_format;
+ layer->paddr = cma_obj->paddr;
+ for (i = 0; i < 4; i++) {
+ layer->pitches[i] = fb->pitches[i];
+ layer->offsets[i] = fb->offsets[i];
+ }
+
+ DRM_DEBUG_DRIVER("%s is associated with mixer_id %d\n",
+ sti_layer_to_str(layer),
+ layer->mixer_id);
+ DRM_DEBUG_DRIVER("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
+ sti_layer_to_str(layer),
+ layer->dst_w, layer->dst_h, layer->dst_x, layer->dst_y,
+ layer->src_w, layer->src_h, layer->src_x,
+ layer->src_y);
+
+ DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
+ (char *)&layer->format, (unsigned long)layer->paddr);
+
+ if (!layer->ops->prepare)
+ goto err_no_prepare;
+
+ ret = layer->ops->prepare(layer, !layer->enabled);
+ if (!ret)
+ layer->enabled = true;
+
+ return ret;
+
+err_no_prepare:
+ DRM_ERROR("Cannot prepare\n");
+ return 1;
+}
+
+int sti_layer_commit(struct sti_layer *layer)
+{
+ if (!layer)
+ return 1;
+
+ if (!layer->ops->commit)
+ goto err_no_commit;
+
+ return layer->ops->commit(layer);
+
+err_no_commit:
+ DRM_ERROR("Cannot commit\n");
+ return 1;
+}
+
+int sti_layer_disable(struct sti_layer *layer)
+{
+ int ret;
+
+ DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
+ if (!layer)
+ return 1;
+
+ if (!layer->enabled)
+ return 0;
+
+ if (!layer->ops->disable)
+ goto err_no_disable;
+
+ ret = layer->ops->disable(layer);
+ if (!ret)
+ layer->enabled = false;
+ else
+ DRM_ERROR("Disable failed\n");
+
+ return ret;
+
+err_no_disable:
+ DRM_ERROR("Cannot disable\n");
+ return 1;
+}
+
+const uint32_t *sti_layer_get_formats(struct sti_layer *layer)
+{
+ if (!layer)
+ return NULL;
+
+ if (!layer->ops->get_formats)
+ return NULL;
+
+ return layer->ops->get_formats(layer);
+}
+
+unsigned int sti_layer_get_nb_formats(struct sti_layer *layer)
+{
+ if (!layer)
+ return 0;
+
+ if (!layer->ops->get_nb_formats)
+ return 0;
+
+ return layer->ops->get_nb_formats(layer);
+}
diff --git a/drivers/gpu/drm/sti/sti_layer.h b/drivers/gpu/drm/sti/sti_layer.h
new file mode 100644
index 000000000000..198c3774cc12
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_layer.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_LAYER_H_
+#define _STI_LAYER_H_
+
+#include <drm/drmP.h>
+
+#define to_sti_layer(x) container_of(x, struct sti_layer, plane)
+
+#define STI_LAYER_TYPE_SHIFT 8
+#define STI_LAYER_TYPE_MASK (~((1<<STI_LAYER_TYPE_SHIFT)-1))
+
+struct sti_layer;
+
+enum sti_layer_type {
+ STI_GDP = 1 << STI_LAYER_TYPE_SHIFT,
+ STI_VID = 2 << STI_LAYER_TYPE_SHIFT,
+ STI_CUR = 3 << STI_LAYER_TYPE_SHIFT,
+ STI_BCK = 4 << STI_LAYER_TYPE_SHIFT
+};
+
+enum sti_layer_id_of_type {
+ STI_ID_0 = 0,
+ STI_ID_1 = 1,
+ STI_ID_2 = 2,
+ STI_ID_3 = 3
+};
+
+enum sti_layer_desc {
+ STI_GDP_0 = STI_GDP | STI_ID_0,
+ STI_GDP_1 = STI_GDP | STI_ID_1,
+ STI_GDP_2 = STI_GDP | STI_ID_2,
+ STI_GDP_3 = STI_GDP | STI_ID_3,
+ STI_VID_0 = STI_VID | STI_ID_0,
+ STI_VID_1 = STI_VID | STI_ID_1,
+ STI_CURSOR = STI_CUR,
+ STI_BACK = STI_BCK
+};
+
+/**
+ * STI layer functions structure
+ *
+ * @get_formats: get layer supported formats
+ * @get_nb_formats: get number of format supported
+ * @init: initialize the layer
+ * @prepare: prepare layer before rendering
+ * @commit: set layer for rendering
+ * @disable: disable layer
+ */
+struct sti_layer_funcs {
+ const uint32_t* (*get_formats)(struct sti_layer *layer);
+ unsigned int (*get_nb_formats)(struct sti_layer *layer);
+ void (*init)(struct sti_layer *layer);
+ int (*prepare)(struct sti_layer *layer, bool first_prepare);
+ int (*commit)(struct sti_layer *layer);
+ int (*disable)(struct sti_layer *layer);
+};
+
+/**
+ * STI layer structure
+ *
+ * @plane: drm plane it is bound to (if any)
+ * @fb: drm fb it is bound to
+ * @mode: display mode
+ * @desc: layer type & id
+ * @device: driver device
+ * @regs: layer registers
+ * @ops: layer functions
+ * @zorder: layer z-order
+ * @mixer_id: id of the mixer used to display the layer
+ * @enabled: to know if the layer is active or not
+ * @src_x src_y: coordinates of the input (fb) area
+ * @src_w src_h: size of the input (fb) area
+ * @dst_x dst_y: coordinates of the output (crtc) area
+ * @dst_w dst_h: size of the output (crtc) area
+ * @format: format
+ * @pitches: pitch of 'planes' (eg: Y, U, V)
+ * @offsets: offset of 'planes'
+ * @paddr: physical address of the input buffer
+ */
+struct sti_layer {
+ struct drm_plane plane;
+ struct drm_framebuffer *fb;
+ struct drm_display_mode *mode;
+ enum sti_layer_desc desc;
+ struct device *dev;
+ void __iomem *regs;
+ const struct sti_layer_funcs *ops;
+ int zorder;
+ int mixer_id;
+ bool enabled;
+ int src_x, src_y;
+ int src_w, src_h;
+ int dst_x, dst_y;
+ int dst_w, dst_h;
+ uint32_t format;
+ unsigned int pitches[4];
+ unsigned int offsets[4];
+ dma_addr_t paddr;
+};
+
+struct sti_layer *sti_layer_create(struct device *dev, int desc,
+ void __iomem *baseaddr);
+int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
+ struct drm_display_mode *mode,
+ int mixer_id,
+ int dest_x, int dest_y,
+ int dest_w, int dest_h,
+ int src_x, int src_y,
+ int src_w, int src_h);
+int sti_layer_commit(struct sti_layer *layer);
+int sti_layer_disable(struct sti_layer *layer);
+const uint32_t *sti_layer_get_formats(struct sti_layer *layer);
+unsigned int sti_layer_get_nb_formats(struct sti_layer *layer);
+const char *sti_layer_to_str(struct sti_layer *layer);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
new file mode 100644
index 000000000000..79f369db9fb6
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include "sti_compositor.h"
+#include "sti_mixer.h"
+#include "sti_vtg.h"
+
+/* Identity: G=Y , B=Cb , R=Cr */
+static const u32 mixerColorSpaceMatIdentity[] = {
+ 0x10000000, 0x00000000, 0x10000000, 0x00001000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000
+};
+
+/* regs offset */
+#define GAM_MIXER_CTL 0x00
+#define GAM_MIXER_BKC 0x04
+#define GAM_MIXER_BCO 0x0C
+#define GAM_MIXER_BCS 0x10
+#define GAM_MIXER_AVO 0x28
+#define GAM_MIXER_AVS 0x2C
+#define GAM_MIXER_CRB 0x34
+#define GAM_MIXER_ACT 0x38
+#define GAM_MIXER_MBP 0x3C
+#define GAM_MIXER_MX0 0x80
+
+/* id for depth of CRB reg */
+#define GAM_DEPTH_VID0_ID 1
+#define GAM_DEPTH_VID1_ID 2
+#define GAM_DEPTH_GDP0_ID 3
+#define GAM_DEPTH_GDP1_ID 4
+#define GAM_DEPTH_GDP2_ID 5
+#define GAM_DEPTH_GDP3_ID 6
+#define GAM_DEPTH_MASK_ID 7
+
+/* mask in CTL reg */
+#define GAM_CTL_BACK_MASK BIT(0)
+#define GAM_CTL_VID0_MASK BIT(1)
+#define GAM_CTL_VID1_MASK BIT(2)
+#define GAM_CTL_GDP0_MASK BIT(3)
+#define GAM_CTL_GDP1_MASK BIT(4)
+#define GAM_CTL_GDP2_MASK BIT(5)
+#define GAM_CTL_GDP3_MASK BIT(6)
+
+const char *sti_mixer_to_str(struct sti_mixer *mixer)
+{
+ switch (mixer->id) {
+ case STI_MIXER_MAIN:
+ return "MAIN_MIXER";
+ case STI_MIXER_AUX:
+ return "AUX_MIXER";
+ default:
+ return "<UNKNOWN MIXER>";
+ }
+}
+
+static inline u32 sti_mixer_reg_read(struct sti_mixer *mixer, u32 reg_id)
+{
+ return readl(mixer->regs + reg_id);
+}
+
+static inline void sti_mixer_reg_write(struct sti_mixer *mixer,
+ u32 reg_id, u32 val)
+{
+ writel(val, mixer->regs + reg_id);
+}
+
+void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable)
+{
+ u32 val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL);
+
+ val &= ~GAM_CTL_BACK_MASK;
+ val |= enable;
+ sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
+}
+
+static void sti_mixer_set_background_color(struct sti_mixer *mixer,
+ u8 red, u8 green, u8 blue)
+{
+ u32 val = (red << 16) | (green << 8) | blue;
+
+ sti_mixer_reg_write(mixer, GAM_MIXER_BKC, val);
+}
+
+static void sti_mixer_set_background_area(struct sti_mixer *mixer,
+ struct drm_display_mode *mode)
+{
+ u32 ydo, xdo, yds, xds;
+
+ ydo = sti_vtg_get_line_number(*mode, 0);
+ yds = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
+ xdo = sti_vtg_get_pixel_number(*mode, 0);
+ xds = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
+
+ sti_mixer_reg_write(mixer, GAM_MIXER_BCO, ydo << 16 | xdo);
+ sti_mixer_reg_write(mixer, GAM_MIXER_BCS, yds << 16 | xds);
+}
+
+int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer)
+{
+ int layer_id = 0, depth = layer->zorder;
+ u32 mask, val;
+
+ if (depth >= GAM_MIXER_NB_DEPTH_LEVEL)
+ return 1;
+
+ switch (layer->desc) {
+ case STI_GDP_0:
+ layer_id = GAM_DEPTH_GDP0_ID;
+ break;
+ case STI_GDP_1:
+ layer_id = GAM_DEPTH_GDP1_ID;
+ break;
+ case STI_GDP_2:
+ layer_id = GAM_DEPTH_GDP2_ID;
+ break;
+ case STI_GDP_3:
+ layer_id = GAM_DEPTH_GDP3_ID;
+ break;
+ case STI_VID_0:
+ layer_id = GAM_DEPTH_VID0_ID;
+ break;
+ case STI_VID_1:
+ layer_id = GAM_DEPTH_VID1_ID;
+ break;
+ default:
+ DRM_ERROR("Unknown layer %d\n", layer->desc);
+ return 1;
+ }
+ mask = GAM_DEPTH_MASK_ID << (3 * depth);
+ layer_id = layer_id << (3 * depth);
+
+ DRM_DEBUG_DRIVER("%s %s depth=%d\n", sti_mixer_to_str(mixer),
+ sti_layer_to_str(layer), depth);
+ dev_dbg(mixer->dev, "GAM_MIXER_CRB val 0x%x mask 0x%x\n",
+ layer_id, mask);
+
+ val = sti_mixer_reg_read(mixer, GAM_MIXER_CRB);
+ val &= ~mask;
+ val |= layer_id;
+ sti_mixer_reg_write(mixer, GAM_MIXER_CRB, val);
+
+ dev_dbg(mixer->dev, "Read GAM_MIXER_CRB 0x%x\n",
+ sti_mixer_reg_read(mixer, GAM_MIXER_CRB));
+ return 0;
+}
+
+int sti_mixer_active_video_area(struct sti_mixer *mixer,
+ struct drm_display_mode *mode)
+{
+ u32 ydo, xdo, yds, xds;
+
+ ydo = sti_vtg_get_line_number(*mode, 0);
+ yds = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
+ xdo = sti_vtg_get_pixel_number(*mode, 0);
+ xds = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
+
+ DRM_DEBUG_DRIVER("%s active video area xdo:%d ydo:%d xds:%d yds:%d\n",
+ sti_mixer_to_str(mixer), xdo, ydo, xds, yds);
+ sti_mixer_reg_write(mixer, GAM_MIXER_AVO, ydo << 16 | xdo);
+ sti_mixer_reg_write(mixer, GAM_MIXER_AVS, yds << 16 | xds);
+
+ sti_mixer_set_background_color(mixer, 0xFF, 0, 0);
+
+ sti_mixer_set_background_area(mixer, mode);
+ sti_mixer_set_background_status(mixer, true);
+ return 0;
+}
+
+static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
+{
+ switch (layer->desc) {
+ case STI_BACK:
+ return GAM_CTL_BACK_MASK;
+ case STI_GDP_0:
+ return GAM_CTL_GDP0_MASK;
+ case STI_GDP_1:
+ return GAM_CTL_GDP1_MASK;
+ case STI_GDP_2:
+ return GAM_CTL_GDP2_MASK;
+ case STI_GDP_3:
+ return GAM_CTL_GDP3_MASK;
+ case STI_VID_0:
+ return GAM_CTL_VID0_MASK;
+ case STI_VID_1:
+ return GAM_CTL_VID1_MASK;
+ default:
+ return 0;
+ }
+}
+
+int sti_mixer_set_layer_status(struct sti_mixer *mixer,
+ struct sti_layer *layer, bool status)
+{
+ u32 mask, val;
+
+ DRM_DEBUG_DRIVER("%s %s %s\n", status ? "enable" : "disable",
+ sti_mixer_to_str(mixer), sti_layer_to_str(layer));
+
+ mask = sti_mixer_get_layer_mask(layer);
+ if (!mask) {
+ DRM_ERROR("Can not find layer mask\n");
+ return -EINVAL;
+ }
+
+ val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL);
+ val &= ~mask;
+ val |= status ? mask : 0;
+ sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
+
+ return 0;
+}
+
+void sti_mixer_set_matrix(struct sti_mixer *mixer)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(mixerColorSpaceMatIdentity); i++)
+ sti_mixer_reg_write(mixer, GAM_MIXER_MX0 + (i * 4),
+ mixerColorSpaceMatIdentity[i]);
+}
+
+struct sti_mixer *sti_mixer_create(struct device *dev, int id,
+ void __iomem *baseaddr)
+{
+ struct sti_mixer *mixer = devm_kzalloc(dev, sizeof(*mixer), GFP_KERNEL);
+ struct device_node *np = dev->of_node;
+
+ dev_dbg(dev, "%s\n", __func__);
+ if (!mixer) {
+ DRM_ERROR("Failed to allocated memory for mixer\n");
+ return NULL;
+ }
+ mixer->regs = baseaddr;
+ mixer->dev = dev;
+ mixer->id = id;
+
+ if (of_device_is_compatible(np, "st,stih416-compositor"))
+ sti_mixer_set_matrix(mixer);
+
+ DRM_DEBUG_DRIVER("%s created. Regs=%p\n",
+ sti_mixer_to_str(mixer), mixer->regs);
+
+ return mixer;
+}
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
new file mode 100644
index 000000000000..874372102e52
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_MIXER_H_
+#define _STI_MIXER_H_
+
+#include <drm/drmP.h>
+
+#include "sti_layer.h"
+
+#define to_sti_mixer(x) container_of(x, struct sti_mixer, drm_crtc)
+
+/**
+ * STI Mixer subdevice structure
+ *
+ * @dev: driver device
+ * @regs: mixer registers
+ * @id: id of the mixer
+ * @drm_crtc: crtc object link to the mixer
+ * @pending_event: set if a flip event is pending on crtc
+ */
+struct sti_mixer {
+ struct device *dev;
+ void __iomem *regs;
+ int id;
+ struct drm_crtc drm_crtc;
+ struct drm_pending_vblank_event *pending_event;
+};
+
+const char *sti_mixer_to_str(struct sti_mixer *mixer);
+
+struct sti_mixer *sti_mixer_create(struct device *dev, int id,
+ void __iomem *baseaddr);
+
+int sti_mixer_set_layer_status(struct sti_mixer *mixer,
+ struct sti_layer *layer, bool status);
+int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer);
+int sti_mixer_active_video_area(struct sti_mixer *mixer,
+ struct drm_display_mode *mode);
+
+void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
+
+/* depth in Cross-bar control = z order */
+#define GAM_MIXER_NB_DEPTH_LEVEL 7
+
+#define STI_MIXER_MAIN 0
+#define STI_MIXER_AUX 1
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
new file mode 100644
index 000000000000..b69e26fee76e
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -0,0 +1,648 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Vincent Abriou <vincent.abriou@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
+/* glue registers */
+#define TVO_CSC_MAIN_M0 0x000
+#define TVO_CSC_MAIN_M1 0x004
+#define TVO_CSC_MAIN_M2 0x008
+#define TVO_CSC_MAIN_M3 0x00c
+#define TVO_CSC_MAIN_M4 0x010
+#define TVO_CSC_MAIN_M5 0x014
+#define TVO_CSC_MAIN_M6 0x018
+#define TVO_CSC_MAIN_M7 0x01c
+#define TVO_MAIN_IN_VID_FORMAT 0x030
+#define TVO_CSC_AUX_M0 0x100
+#define TVO_CSC_AUX_M1 0x104
+#define TVO_CSC_AUX_M2 0x108
+#define TVO_CSC_AUX_M3 0x10c
+#define TVO_CSC_AUX_M4 0x110
+#define TVO_CSC_AUX_M5 0x114
+#define TVO_CSC_AUX_M6 0x118
+#define TVO_CSC_AUX_M7 0x11c
+#define TVO_AUX_IN_VID_FORMAT 0x130
+#define TVO_VIP_HDF 0x400
+#define TVO_HD_SYNC_SEL 0x418
+#define TVO_HD_DAC_CFG_OFF 0x420
+#define TVO_VIP_HDMI 0x500
+#define TVO_HDMI_FORCE_COLOR_0 0x504
+#define TVO_HDMI_FORCE_COLOR_1 0x508
+#define TVO_HDMI_CLIP_VALUE_B_CB 0x50c
+#define TVO_HDMI_CLIP_VALUE_Y_G 0x510
+#define TVO_HDMI_CLIP_VALUE_R_CR 0x514
+#define TVO_HDMI_SYNC_SEL 0x518
+#define TVO_HDMI_DFV_OBS 0x540
+
+#define TVO_IN_FMT_SIGNED BIT(0)
+#define TVO_SYNC_EXT BIT(4)
+
+#define TVO_VIP_REORDER_R_SHIFT 24
+#define TVO_VIP_REORDER_G_SHIFT 20
+#define TVO_VIP_REORDER_B_SHIFT 16
+#define TVO_VIP_REORDER_MASK 0x3
+#define TVO_VIP_REORDER_Y_G_SEL 0
+#define TVO_VIP_REORDER_CB_B_SEL 1
+#define TVO_VIP_REORDER_CR_R_SEL 2
+
+#define TVO_VIP_CLIP_SHIFT 8
+#define TVO_VIP_CLIP_MASK 0x7
+#define TVO_VIP_CLIP_DISABLED 0
+#define TVO_VIP_CLIP_EAV_SAV 1
+#define TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y 2
+#define TVO_VIP_CLIP_LIMITED_RANGE_CB_CR 3
+#define TVO_VIP_CLIP_PROG_RANGE 4
+
+#define TVO_VIP_RND_SHIFT 4
+#define TVO_VIP_RND_MASK 0x3
+#define TVO_VIP_RND_8BIT_ROUNDED 0
+#define TVO_VIP_RND_10BIT_ROUNDED 1
+#define TVO_VIP_RND_12BIT_ROUNDED 2
+
+#define TVO_VIP_SEL_INPUT_MASK 0xf
+#define TVO_VIP_SEL_INPUT_MAIN 0x0
+#define TVO_VIP_SEL_INPUT_AUX 0x8
+#define TVO_VIP_SEL_INPUT_FORCE_COLOR 0xf
+#define TVO_VIP_SEL_INPUT_BYPASS_MASK 0x1
+#define TVO_VIP_SEL_INPUT_BYPASSED 1
+
+#define TVO_SYNC_MAIN_VTG_SET_REF 0x00
+#define TVO_SYNC_MAIN_VTG_SET_1 0x01
+#define TVO_SYNC_MAIN_VTG_SET_2 0x02
+#define TVO_SYNC_MAIN_VTG_SET_3 0x03
+#define TVO_SYNC_MAIN_VTG_SET_4 0x04
+#define TVO_SYNC_MAIN_VTG_SET_5 0x05
+#define TVO_SYNC_MAIN_VTG_SET_6 0x06
+#define TVO_SYNC_AUX_VTG_SET_REF 0x10
+#define TVO_SYNC_AUX_VTG_SET_1 0x11
+#define TVO_SYNC_AUX_VTG_SET_2 0x12
+#define TVO_SYNC_AUX_VTG_SET_3 0x13
+#define TVO_SYNC_AUX_VTG_SET_4 0x14
+#define TVO_SYNC_AUX_VTG_SET_5 0x15
+#define TVO_SYNC_AUX_VTG_SET_6 0x16
+
+#define TVO_SYNC_HD_DCS_SHIFT 8
+
+#define ENCODER_MAIN_CRTC_MASK BIT(0)
+
+/* enum listing the supported output data format */
+enum sti_tvout_video_out_type {
+ STI_TVOUT_VIDEO_OUT_RGB,
+ STI_TVOUT_VIDEO_OUT_YUV,
+};
+
+struct sti_tvout {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ void __iomem *regs;
+ struct reset_control *reset;
+ struct drm_encoder *hdmi;
+ struct drm_encoder *hda;
+};
+
+struct sti_tvout_encoder {
+ struct drm_encoder encoder;
+ struct sti_tvout *tvout;
+};
+
+#define to_sti_tvout_encoder(x) \
+ container_of(x, struct sti_tvout_encoder, encoder)
+
+#define to_sti_tvout(x) to_sti_tvout_encoder(x)->tvout
+
+/* preformatter conversion matrix */
+static const u32 rgb_to_ycbcr_601[8] = {
+ 0xF927082E, 0x04C9FEAB, 0x01D30964, 0xFA95FD3D,
+ 0x0000082E, 0x00002000, 0x00002000, 0x00000000
+};
+
+/* 709 RGB to YCbCr */
+static const u32 rgb_to_ycbcr_709[8] = {
+ 0xF891082F, 0x0367FF40, 0x01280B71, 0xF9B1FE20,
+ 0x0000082F, 0x00002000, 0x00002000, 0x00000000
+};
+
+static u32 tvout_read(struct sti_tvout *tvout, int offset)
+{
+ return readl(tvout->regs + offset);
+}
+
+static void tvout_write(struct sti_tvout *tvout, u32 val, int offset)
+{
+ writel(val, tvout->regs + offset);
+}
+
+/**
+ * Set the clipping mode of a VIP
+ *
+ * @tvout: tvout structure
+ * @cr_r:
+ * @y_g:
+ * @cb_b:
+ */
+static void tvout_vip_set_color_order(struct sti_tvout *tvout,
+ u32 cr_r, u32 y_g, u32 cb_b)
+{
+ u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+
+ val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_R_SHIFT);
+ val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_G_SHIFT);
+ val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_B_SHIFT);
+ val |= cr_r << TVO_VIP_REORDER_R_SHIFT;
+ val |= y_g << TVO_VIP_REORDER_G_SHIFT;
+ val |= cb_b << TVO_VIP_REORDER_B_SHIFT;
+
+ tvout_write(tvout, val, TVO_VIP_HDMI);
+}
+
+/**
+ * Set the clipping mode of a VIP
+ *
+ * @tvout: tvout structure
+ * @range: clipping range
+ */
+static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, u32 range)
+{
+ u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+
+ val &= ~(TVO_VIP_CLIP_MASK << TVO_VIP_CLIP_SHIFT);
+ val |= range << TVO_VIP_CLIP_SHIFT;
+ tvout_write(tvout, val, TVO_VIP_HDMI);
+}
+
+/**
+ * Set the rounded value of a VIP
+ *
+ * @tvout: tvout structure
+ * @rnd: rounded val per component
+ */
+static void tvout_vip_set_rnd(struct sti_tvout *tvout, u32 rnd)
+{
+ u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+
+ val &= ~(TVO_VIP_RND_MASK << TVO_VIP_RND_SHIFT);
+ val |= rnd << TVO_VIP_RND_SHIFT;
+ tvout_write(tvout, val, TVO_VIP_HDMI);
+}
+
+/**
+ * Select the VIP input
+ *
+ * @tvout: tvout structure
+ * @sel_input: selected_input (main/aux + conv)
+ */
+static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
+ bool main_path,
+ bool sel_input_logic_inverted,
+ enum sti_tvout_video_out_type video_out)
+{
+ u32 sel_input;
+ u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+
+ if (main_path)
+ sel_input = TVO_VIP_SEL_INPUT_MAIN;
+ else
+ sel_input = TVO_VIP_SEL_INPUT_AUX;
+
+ switch (video_out) {
+ case STI_TVOUT_VIDEO_OUT_RGB:
+ sel_input |= TVO_VIP_SEL_INPUT_BYPASSED;
+ break;
+ case STI_TVOUT_VIDEO_OUT_YUV:
+ sel_input &= ~TVO_VIP_SEL_INPUT_BYPASSED;
+ break;
+ }
+
+ /* on stih407 chip the sel_input bypass mode logic is inverted */
+ if (sel_input_logic_inverted)
+ sel_input = sel_input ^ TVO_VIP_SEL_INPUT_BYPASS_MASK;
+
+ val &= ~TVO_VIP_SEL_INPUT_MASK;
+ val |= sel_input;
+ tvout_write(tvout, val, TVO_VIP_HDMI);
+}
+
+/**
+ * Select the input video signed or unsigned
+ *
+ * @tvout: tvout structure
+ * @in_vid_signed: used video input format
+ */
+static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout, u32 in_vid_fmt)
+{
+ u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+
+ val &= ~TVO_IN_FMT_SIGNED;
+ val |= in_vid_fmt;
+ tvout_write(tvout, val, TVO_MAIN_IN_VID_FORMAT);
+}
+
+/**
+ * Start VIP block for HDMI output
+ *
+ * @tvout: pointer on tvout structure
+ * @main_path: true if main path has to be used in the vip configuration
+ * else aux path is used.
+ */
+static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
+{
+ struct device_node *node = tvout->dev->of_node;
+ bool sel_input_logic_inverted = false;
+
+ dev_dbg(tvout->dev, "%s\n", __func__);
+
+ if (main_path) {
+ DRM_DEBUG_DRIVER("main vip for hdmi\n");
+ /* select the input sync for hdmi = VTG set 1 */
+ tvout_write(tvout, TVO_SYNC_MAIN_VTG_SET_1, TVO_HDMI_SYNC_SEL);
+ } else {
+ DRM_DEBUG_DRIVER("aux vip for hdmi\n");
+ /* select the input sync for hdmi = VTG set 1 */
+ tvout_write(tvout, TVO_SYNC_AUX_VTG_SET_1, TVO_HDMI_SYNC_SEL);
+ }
+
+ /* set color channel order */
+ tvout_vip_set_color_order(tvout,
+ TVO_VIP_REORDER_CR_R_SEL,
+ TVO_VIP_REORDER_Y_G_SEL,
+ TVO_VIP_REORDER_CB_B_SEL);
+
+ /* set clipping mode (Limited range RGB/Y) */
+ tvout_vip_set_clip_mode(tvout, TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y);
+
+ /* set round mode (rounded to 8-bit per component) */
+ tvout_vip_set_rnd(tvout, TVO_VIP_RND_8BIT_ROUNDED);
+
+ if (of_device_is_compatible(node, "st,stih407-tvout")) {
+ /* set input video format */
+ tvout_vip_set_in_vid_fmt(tvout->regs + TVO_MAIN_IN_VID_FORMAT,
+ TVO_IN_FMT_SIGNED);
+ sel_input_logic_inverted = true;
+ }
+
+ /* input selection */
+ tvout_vip_set_sel_input(tvout, main_path,
+ sel_input_logic_inverted, STI_TVOUT_VIDEO_OUT_RGB);
+}
+
+/**
+ * Start HDF VIP and HD DAC
+ *
+ * @tvout: pointer on tvout structure
+ * @main_path: true if main path has to be used in the vip configuration
+ * else aux path is used.
+ */
+static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
+{
+ struct device_node *node = tvout->dev->of_node;
+ bool sel_input_logic_inverted = false;
+
+ dev_dbg(tvout->dev, "%s\n", __func__);
+
+ if (!main_path) {
+ DRM_ERROR("HD Analog on aux not implemented\n");
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("main vip for HDF\n");
+
+ /* set color channel order */
+ tvout_vip_set_color_order(tvout->regs + TVO_VIP_HDF,
+ TVO_VIP_REORDER_CR_R_SEL,
+ TVO_VIP_REORDER_Y_G_SEL,
+ TVO_VIP_REORDER_CB_B_SEL);
+
+ /* set clipping mode (Limited range RGB/Y) */
+ tvout_vip_set_clip_mode(tvout->regs + TVO_VIP_HDF,
+ TVO_VIP_CLIP_LIMITED_RANGE_CB_CR);
+
+ /* set round mode (rounded to 10-bit per component) */
+ tvout_vip_set_rnd(tvout->regs + TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED);
+
+ if (of_device_is_compatible(node, "st,stih407-tvout")) {
+ /* set input video format */
+ tvout_vip_set_in_vid_fmt(tvout, TVO_IN_FMT_SIGNED);
+ sel_input_logic_inverted = true;
+ }
+
+ /* Input selection */
+ tvout_vip_set_sel_input(tvout->regs + TVO_VIP_HDF,
+ main_path,
+ sel_input_logic_inverted,
+ STI_TVOUT_VIDEO_OUT_YUV);
+
+ /* select the input sync for HD analog = VTG set 3
+ * and HD DCS = VTG set 2 */
+ tvout_write(tvout,
+ (TVO_SYNC_MAIN_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT)
+ | TVO_SYNC_MAIN_VTG_SET_3,
+ TVO_HD_SYNC_SEL);
+
+ /* power up HD DAC */
+ tvout_write(tvout, 0, TVO_HD_DAC_CFG_OFF);
+}
+
+static void sti_tvout_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+}
+
+static bool sti_tvout_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void sti_tvout_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static void sti_tvout_encoder_prepare(struct drm_encoder *encoder)
+{
+}
+
+static void sti_tvout_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct sti_tvout_encoder *sti_encoder = to_sti_tvout_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(sti_encoder);
+}
+
+static const struct drm_encoder_funcs sti_tvout_encoder_funcs = {
+ .destroy = sti_tvout_encoder_destroy,
+};
+
+static void sti_hda_encoder_commit(struct drm_encoder *encoder)
+{
+ struct sti_tvout *tvout = to_sti_tvout(encoder);
+
+ tvout_hda_start(tvout, true);
+}
+
+static void sti_hda_encoder_disable(struct drm_encoder *encoder)
+{
+ struct sti_tvout *tvout = to_sti_tvout(encoder);
+
+ /* reset VIP register */
+ tvout_write(tvout, 0x0, TVO_VIP_HDF);
+
+ /* power down HD DAC */
+ tvout_write(tvout, 1, TVO_HD_DAC_CFG_OFF);
+}
+
+static const struct drm_encoder_helper_funcs sti_hda_encoder_helper_funcs = {
+ .dpms = sti_tvout_encoder_dpms,
+ .mode_fixup = sti_tvout_encoder_mode_fixup,
+ .mode_set = sti_tvout_encoder_mode_set,
+ .prepare = sti_tvout_encoder_prepare,
+ .commit = sti_hda_encoder_commit,
+ .disable = sti_hda_encoder_disable,
+};
+
+static struct drm_encoder *sti_tvout_create_hda_encoder(struct drm_device *dev,
+ struct sti_tvout *tvout)
+{
+ struct sti_tvout_encoder *encoder;
+ struct drm_encoder *drm_encoder;
+
+ encoder = devm_kzalloc(tvout->dev, sizeof(*encoder), GFP_KERNEL);
+ if (!encoder)
+ return NULL;
+
+ encoder->tvout = tvout;
+
+ drm_encoder = (struct drm_encoder *) encoder;
+
+ drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK;
+ drm_encoder->possible_clones = 1 << 0;
+
+ drm_encoder_init(dev, drm_encoder,
+ &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_DAC);
+
+ drm_encoder_helper_add(drm_encoder, &sti_hda_encoder_helper_funcs);
+
+ return drm_encoder;
+}
+
+static void sti_hdmi_encoder_commit(struct drm_encoder *encoder)
+{
+ struct sti_tvout *tvout = to_sti_tvout(encoder);
+
+ tvout_hdmi_start(tvout, true);
+}
+
+static void sti_hdmi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct sti_tvout *tvout = to_sti_tvout(encoder);
+
+ /* reset VIP register */
+ tvout_write(tvout, 0x0, TVO_VIP_HDMI);
+}
+
+static const struct drm_encoder_helper_funcs sti_hdmi_encoder_helper_funcs = {
+ .dpms = sti_tvout_encoder_dpms,
+ .mode_fixup = sti_tvout_encoder_mode_fixup,
+ .mode_set = sti_tvout_encoder_mode_set,
+ .prepare = sti_tvout_encoder_prepare,
+ .commit = sti_hdmi_encoder_commit,
+ .disable = sti_hdmi_encoder_disable,
+};
+
+static struct drm_encoder *sti_tvout_create_hdmi_encoder(struct drm_device *dev,
+ struct sti_tvout *tvout)
+{
+ struct sti_tvout_encoder *encoder;
+ struct drm_encoder *drm_encoder;
+
+ encoder = devm_kzalloc(tvout->dev, sizeof(*encoder), GFP_KERNEL);
+ if (!encoder)
+ return NULL;
+
+ encoder->tvout = tvout;
+
+ drm_encoder = (struct drm_encoder *) encoder;
+
+ drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK;
+ drm_encoder->possible_clones = 1 << 1;
+
+ drm_encoder_init(dev, drm_encoder,
+ &sti_tvout_encoder_funcs, DRM_MODE_ENCODER_TMDS);
+
+ drm_encoder_helper_add(drm_encoder, &sti_hdmi_encoder_helper_funcs);
+
+ return drm_encoder;
+}
+
+static void sti_tvout_create_encoders(struct drm_device *dev,
+ struct sti_tvout *tvout)
+{
+ tvout->hdmi = sti_tvout_create_hdmi_encoder(dev, tvout);
+ tvout->hda = sti_tvout_create_hda_encoder(dev, tvout);
+}
+
+static void sti_tvout_destroy_encoders(struct sti_tvout *tvout)
+{
+ if (tvout->hdmi)
+ drm_encoder_cleanup(tvout->hdmi);
+ tvout->hdmi = NULL;
+
+ if (tvout->hda)
+ drm_encoder_cleanup(tvout->hda);
+ tvout->hda = NULL;
+}
+
+static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
+{
+ struct sti_tvout *tvout = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ unsigned int i;
+ int ret;
+
+ tvout->drm_dev = drm_dev;
+
+ /* set preformatter matrix */
+ for (i = 0; i < 8; i++) {
+ tvout_write(tvout, rgb_to_ycbcr_601[i],
+ TVO_CSC_MAIN_M0 + (i * 4));
+ tvout_write(tvout, rgb_to_ycbcr_601[i],
+ TVO_CSC_AUX_M0 + (i * 4));
+ }
+
+ sti_tvout_create_encoders(drm_dev, tvout);
+
+ ret = component_bind_all(dev, drm_dev);
+ if (ret)
+ sti_tvout_destroy_encoders(tvout);
+
+ return ret;
+}
+
+static void sti_tvout_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ /* do nothing */
+}
+
+static const struct component_ops sti_tvout_ops = {
+ .bind = sti_tvout_bind,
+ .unbind = sti_tvout_unbind,
+};
+
+static int compare_of(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+static int sti_tvout_master_bind(struct device *dev)
+{
+ return 0;
+}
+
+static void sti_tvout_master_unbind(struct device *dev)
+{
+ /* do nothing */
+}
+
+static const struct component_master_ops sti_tvout_master_ops = {
+ .bind = sti_tvout_master_bind,
+ .unbind = sti_tvout_master_unbind,
+};
+
+static int sti_tvout_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct sti_tvout *tvout;
+ struct resource *res;
+ struct device_node *child_np;
+ struct component_match *match = NULL;
+
+ DRM_INFO("%s\n", __func__);
+
+ if (!node)
+ return -ENODEV;
+
+ tvout = devm_kzalloc(dev, sizeof(*tvout), GFP_KERNEL);
+ if (!tvout)
+ return -ENOMEM;
+
+ tvout->dev = dev;
+
+ /* get Memory ressources */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tvout-reg");
+ if (!res) {
+ DRM_ERROR("Invalid glue resource\n");
+ return -ENOMEM;
+ }
+ tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (IS_ERR(tvout->regs))
+ return PTR_ERR(tvout->regs);
+
+ /* get reset resources */
+ tvout->reset = devm_reset_control_get(dev, "tvout");
+ /* take tvout out of reset */
+ if (!IS_ERR(tvout->reset))
+ reset_control_deassert(tvout->reset);
+
+ platform_set_drvdata(pdev, tvout);
+
+ of_platform_populate(node, NULL, NULL, dev);
+
+ child_np = of_get_next_available_child(node, NULL);
+
+ while (child_np) {
+ component_match_add(dev, &match, compare_of, child_np);
+ of_node_put(child_np);
+ child_np = of_get_next_available_child(node, child_np);
+ }
+
+ component_master_add_with_match(dev, &sti_tvout_master_ops, match);
+
+ return component_add(dev, &sti_tvout_ops);
+}
+
+static int sti_tvout_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &sti_tvout_master_ops);
+ component_del(&pdev->dev, &sti_tvout_ops);
+ return 0;
+}
+
+static struct of_device_id tvout_of_match[] = {
+ { .compatible = "st,stih416-tvout", },
+ { .compatible = "st,stih407-tvout", },
+ { /* end node */ }
+};
+MODULE_DEVICE_TABLE(of, tvout_of_match);
+
+struct platform_driver sti_tvout_driver = {
+ .driver = {
+ .name = "sti-tvout",
+ .owner = THIS_MODULE,
+ .of_match_table = tvout_of_match,
+ },
+ .probe = sti_tvout_probe,
+ .remove = sti_tvout_remove,
+};
+
+module_platform_driver(sti_tvout_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
new file mode 100644
index 000000000000..10ced6a479f4
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <drm/drmP.h>
+
+#include "sti_layer.h"
+#include "sti_vid.h"
+#include "sti_vtg.h"
+
+/* Registers */
+#define VID_CTL 0x00
+#define VID_ALP 0x04
+#define VID_CLF 0x08
+#define VID_VPO 0x0C
+#define VID_VPS 0x10
+#define VID_KEY1 0x28
+#define VID_KEY2 0x2C
+#define VID_MPR0 0x30
+#define VID_MPR1 0x34
+#define VID_MPR2 0x38
+#define VID_MPR3 0x3C
+#define VID_MST 0x68
+#define VID_BC 0x70
+#define VID_TINT 0x74
+#define VID_CSAT 0x78
+
+/* Registers values */
+#define VID_CTL_IGNORE (BIT(31) | BIT(30))
+#define VID_CTL_PSI_ENABLE (BIT(2) | BIT(1) | BIT(0))
+#define VID_ALP_OPAQUE 0x00000080
+#define VID_BC_DFLT 0x00008000
+#define VID_TINT_DFLT 0x00000000
+#define VID_CSAT_DFLT 0x00000080
+/* YCbCr to RGB BT709:
+ * R = Y+1.5391Cr
+ * G = Y-0.4590Cr-0.1826Cb
+ * B = Y+1.8125Cb */
+#define VID_MPR0_BT709 0x0A800000
+#define VID_MPR1_BT709 0x0AC50000
+#define VID_MPR2_BT709 0x07150545
+#define VID_MPR3_BT709 0x00000AE8
+
+static int sti_vid_prepare_layer(struct sti_layer *vid, bool first_prepare)
+{
+ u32 val;
+
+ /* Unmask */
+ val = readl(vid->regs + VID_CTL);
+ val &= ~VID_CTL_IGNORE;
+ writel(val, vid->regs + VID_CTL);
+
+ return 0;
+}
+
+static int sti_vid_commit_layer(struct sti_layer *vid)
+{
+ struct drm_display_mode *mode = vid->mode;
+ u32 ydo, xdo, yds, xds;
+
+ ydo = sti_vtg_get_line_number(*mode, vid->dst_y);
+ yds = sti_vtg_get_line_number(*mode, vid->dst_y + vid->dst_h - 1);
+ xdo = sti_vtg_get_pixel_number(*mode, vid->dst_x);
+ xds = sti_vtg_get_pixel_number(*mode, vid->dst_x + vid->dst_w - 1);
+
+ writel((ydo << 16) | xdo, vid->regs + VID_VPO);
+ writel((yds << 16) | xds, vid->regs + VID_VPS);
+
+ return 0;
+}
+
+static int sti_vid_disable_layer(struct sti_layer *vid)
+{
+ u32 val;
+
+ /* Mask */
+ val = readl(vid->regs + VID_CTL);
+ val |= VID_CTL_IGNORE;
+ writel(val, vid->regs + VID_CTL);
+
+ return 0;
+}
+
+static const uint32_t *sti_vid_get_formats(struct sti_layer *layer)
+{
+ return NULL;
+}
+
+static unsigned int sti_vid_get_nb_formats(struct sti_layer *layer)
+{
+ return 0;
+}
+
+static void sti_vid_init(struct sti_layer *vid)
+{
+ /* Enable PSI, Mask layer */
+ writel(VID_CTL_PSI_ENABLE | VID_CTL_IGNORE, vid->regs + VID_CTL);
+
+ /* Opaque */
+ writel(VID_ALP_OPAQUE, vid->regs + VID_ALP);
+
+ /* Color conversion parameters */
+ writel(VID_MPR0_BT709, vid->regs + VID_MPR0);
+ writel(VID_MPR1_BT709, vid->regs + VID_MPR1);
+ writel(VID_MPR2_BT709, vid->regs + VID_MPR2);
+ writel(VID_MPR3_BT709, vid->regs + VID_MPR3);
+
+ /* Brightness, contrast, tint, saturation */
+ writel(VID_BC_DFLT, vid->regs + VID_BC);
+ writel(VID_TINT_DFLT, vid->regs + VID_TINT);
+ writel(VID_CSAT_DFLT, vid->regs + VID_CSAT);
+}
+
+static const struct sti_layer_funcs vid_ops = {
+ .get_formats = sti_vid_get_formats,
+ .get_nb_formats = sti_vid_get_nb_formats,
+ .init = sti_vid_init,
+ .prepare = sti_vid_prepare_layer,
+ .commit = sti_vid_commit_layer,
+ .disable = sti_vid_disable_layer,
+};
+
+struct sti_layer *sti_vid_create(struct device *dev)
+{
+ struct sti_layer *vid;
+
+ vid = devm_kzalloc(dev, sizeof(*vid), GFP_KERNEL);
+ if (!vid) {
+ DRM_ERROR("Failed to allocate memory for VID\n");
+ return NULL;
+ }
+
+ vid->ops = &vid_ops;
+
+ return vid;
+}
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
new file mode 100644
index 000000000000..2c0aecd63294
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_VID_H_
+#define _STI_VID_H_
+
+struct sti_layer *sti_vid_create(struct device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_vtac.c b/drivers/gpu/drm/sti/sti_vtac.c
new file mode 100644
index 000000000000..82a51d488434
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vtac.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <drm/drmP.h>
+
+/* registers offset */
+#define VTAC_CONFIG 0x00
+#define VTAC_RX_FIFO_CONFIG 0x04
+#define VTAC_FIFO_CONFIG_VAL 0x04
+
+#define VTAC_SYS_CFG8521 0x824
+#define VTAC_SYS_CFG8522 0x828
+
+/* Number of phyts per pixel */
+#define VTAC_2_5_PPP 0x0005
+#define VTAC_3_PPP 0x0006
+#define VTAC_4_PPP 0x0008
+#define VTAC_5_PPP 0x000A
+#define VTAC_6_PPP 0x000C
+#define VTAC_13_PPP 0x001A
+#define VTAC_14_PPP 0x001C
+#define VTAC_15_PPP 0x001E
+#define VTAC_16_PPP 0x0020
+#define VTAC_17_PPP 0x0022
+#define VTAC_18_PPP 0x0024
+
+/* enable bits */
+#define VTAC_ENABLE 0x3003
+
+#define VTAC_TX_PHY_ENABLE_CLK_PHY BIT(0)
+#define VTAC_TX_PHY_ENABLE_CLK_DLL BIT(1)
+#define VTAC_TX_PHY_PLL_NOT_OSC_MODE BIT(3)
+#define VTAC_TX_PHY_RST_N_DLL_SWITCH BIT(4)
+#define VTAC_TX_PHY_PROG_N3 BIT(9)
+
+
+/**
+ * VTAC mode structure
+ *
+ * @vid_in_width: Video Data Resolution
+ * @phyts_width: Width of phyt buses(phyt low and phyt high).
+ * @phyts_per_pixel: Number of phyts sent per pixel
+ */
+struct sti_vtac_mode {
+ u32 vid_in_width;
+ u32 phyts_width;
+ u32 phyts_per_pixel;
+};
+
+static const struct sti_vtac_mode vtac_mode_main = {0x2, 0x2, VTAC_5_PPP};
+static const struct sti_vtac_mode vtac_mode_aux = {0x1, 0x0, VTAC_17_PPP};
+
+/**
+ * VTAC structure
+ *
+ * @dev: pointer to device structure
+ * @regs: ioremapped registers for RX and TX devices
+ * @phy_regs: phy registers for TX device
+ * @clk: clock
+ * @mode: main or auxillary configuration mode
+ */
+struct sti_vtac {
+ struct device *dev;
+ void __iomem *regs;
+ void __iomem *phy_regs;
+ struct clk *clk;
+ const struct sti_vtac_mode *mode;
+};
+
+static void sti_vtac_rx_set_config(struct sti_vtac *vtac)
+{
+ u32 config;
+
+ /* Enable VTAC clock */
+ if (clk_prepare_enable(vtac->clk))
+ DRM_ERROR("Failed to prepare/enable vtac_rx clock.\n");
+
+ writel(VTAC_FIFO_CONFIG_VAL, vtac->regs + VTAC_RX_FIFO_CONFIG);
+
+ config = VTAC_ENABLE;
+ config |= vtac->mode->vid_in_width << 4;
+ config |= vtac->mode->phyts_width << 16;
+ config |= vtac->mode->phyts_per_pixel << 23;
+ writel(config, vtac->regs + VTAC_CONFIG);
+}
+
+static void sti_vtac_tx_set_config(struct sti_vtac *vtac)
+{
+ u32 phy_config;
+ u32 config;
+
+ /* Enable VTAC clock */
+ if (clk_prepare_enable(vtac->clk))
+ DRM_ERROR("Failed to prepare/enable vtac_tx clock.\n");
+
+ /* Configure vtac phy */
+ phy_config = 0x00000000;
+ writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8522);
+ phy_config = VTAC_TX_PHY_ENABLE_CLK_PHY;
+ writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config |= VTAC_TX_PHY_PROG_N3;
+ writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config |= VTAC_TX_PHY_ENABLE_CLK_DLL;
+ writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config |= VTAC_TX_PHY_RST_N_DLL_SWITCH;
+ writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
+ phy_config |= VTAC_TX_PHY_PLL_NOT_OSC_MODE;
+ writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
+
+ /* Configure vtac tx */
+ config = VTAC_ENABLE;
+ config |= vtac->mode->vid_in_width << 4;
+ config |= vtac->mode->phyts_width << 16;
+ config |= vtac->mode->phyts_per_pixel << 23;
+ writel(config, vtac->regs + VTAC_CONFIG);
+}
+
+static const struct of_device_id vtac_of_match[] = {
+ {
+ .compatible = "st,vtac-main",
+ .data = &vtac_mode_main,
+ }, {
+ .compatible = "st,vtac-aux",
+ .data = &vtac_mode_aux,
+ }, {
+ /* end node */
+ }
+};
+MODULE_DEVICE_TABLE(of, vtac_of_match);
+
+static int sti_vtac_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct of_device_id *id;
+ struct sti_vtac *vtac;
+ struct resource *res;
+
+ vtac = devm_kzalloc(dev, sizeof(*vtac), GFP_KERNEL);
+ if (!vtac)
+ return -ENOMEM;
+
+ vtac->dev = dev;
+
+ id = of_match_node(vtac_of_match, np);
+ if (!id)
+ return -ENOMEM;
+
+ vtac->mode = id->data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DRM_ERROR("Invalid resource\n");
+ return -ENOMEM;
+ }
+ vtac->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vtac->regs))
+ return PTR_ERR(vtac->regs);
+
+
+ vtac->clk = devm_clk_get(dev, "vtac");
+ if (IS_ERR(vtac->clk)) {
+ DRM_ERROR("Cannot get vtac clock\n");
+ return PTR_ERR(vtac->clk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ vtac->phy_regs = devm_ioremap_nocache(dev, res->start,
+ resource_size(res));
+ sti_vtac_tx_set_config(vtac);
+ } else {
+
+ sti_vtac_rx_set_config(vtac);
+ }
+
+ platform_set_drvdata(pdev, vtac);
+ DRM_INFO("%s %s\n", __func__, dev_name(vtac->dev));
+
+ return 0;
+}
+
+static int sti_vtac_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+struct platform_driver sti_vtac_driver = {
+ .driver = {
+ .name = "sti-vtac",
+ .owner = THIS_MODULE,
+ .of_match_table = vtac_of_match,
+ },
+ .probe = sti_vtac_probe,
+ .remove = sti_vtac_remove,
+};
+
+module_platform_driver(sti_vtac_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
new file mode 100644
index 000000000000..740d6e347a62
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Benjamin Gaignard <benjamin.gaignard@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * Vincent Abriou <vincent.abriou@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+
+#include <drm/drmP.h>
+
+#include "sti_vtg.h"
+
+#define VTG_TYPE_MASTER 0
+#define VTG_TYPE_SLAVE_BY_EXT0 1
+
+/* registers offset */
+#define VTG_MODE 0x0000
+#define VTG_CLKLN 0x0008
+#define VTG_HLFLN 0x000C
+#define VTG_DRST_AUTOC 0x0010
+#define VTG_VID_TFO 0x0040
+#define VTG_VID_TFS 0x0044
+#define VTG_VID_BFO 0x0048
+#define VTG_VID_BFS 0x004C
+
+#define VTG_HOST_ITS 0x0078
+#define VTG_HOST_ITS_BCLR 0x007C
+#define VTG_HOST_ITM_BCLR 0x0088
+#define VTG_HOST_ITM_BSET 0x008C
+
+#define VTG_H_HD_1 0x00C0
+#define VTG_TOP_V_VD_1 0x00C4
+#define VTG_BOT_V_VD_1 0x00C8
+#define VTG_TOP_V_HD_1 0x00CC
+#define VTG_BOT_V_HD_1 0x00D0
+
+#define VTG_H_HD_2 0x00E0
+#define VTG_TOP_V_VD_2 0x00E4
+#define VTG_BOT_V_VD_2 0x00E8
+#define VTG_TOP_V_HD_2 0x00EC
+#define VTG_BOT_V_HD_2 0x00F0
+
+#define VTG_H_HD_3 0x0100
+#define VTG_TOP_V_VD_3 0x0104
+#define VTG_BOT_V_VD_3 0x0108
+#define VTG_TOP_V_HD_3 0x010C
+#define VTG_BOT_V_HD_3 0x0110
+
+#define VTG_IRQ_BOTTOM BIT(0)
+#define VTG_IRQ_TOP BIT(1)
+#define VTG_IRQ_MASK (VTG_IRQ_TOP | VTG_IRQ_BOTTOM)
+
+/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */
+#define AWG_DELAY_HD (-9)
+#define AWG_DELAY_ED (-8)
+#define AWG_DELAY_SD (-7)
+
+LIST_HEAD(vtg_lookup);
+
+/**
+ * STI VTG structure
+ *
+ * @dev: pointer to device driver
+ * @data: data associated to the device
+ * @irq: VTG irq
+ * @type: VTG type (main or aux)
+ * @notifier_list: notifier callback
+ * @crtc_id: the crtc id for vblank event
+ * @slave: slave vtg
+ * @link: List node to link the structure in lookup list
+ */
+struct sti_vtg {
+ struct device *dev;
+ struct device_node *np;
+ void __iomem *regs;
+ int irq;
+ u32 irq_status;
+ struct raw_notifier_head notifier_list;
+ int crtc_id;
+ struct sti_vtg *slave;
+ struct list_head link;
+};
+
+static void vtg_register(struct sti_vtg *vtg)
+{
+ list_add_tail(&vtg->link, &vtg_lookup);
+}
+
+struct sti_vtg *of_vtg_find(struct device_node *np)
+{
+ struct sti_vtg *vtg;
+
+ list_for_each_entry(vtg, &vtg_lookup, link) {
+ if (vtg->np == np)
+ return vtg;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(of_vtg_find);
+
+static void vtg_reset(struct sti_vtg *vtg)
+{
+ /* reset slave and then master */
+ if (vtg->slave)
+ vtg_reset(vtg->slave);
+
+ writel(1, vtg->regs + VTG_DRST_AUTOC);
+}
+
+static void vtg_set_mode(struct sti_vtg *vtg,
+ int type, const struct drm_display_mode *mode)
+{
+ u32 tmp;
+
+ if (vtg->slave)
+ vtg_set_mode(vtg->slave, VTG_TYPE_SLAVE_BY_EXT0, mode);
+
+ writel(mode->htotal, vtg->regs + VTG_CLKLN);
+ writel(mode->vtotal * 2, vtg->regs + VTG_HLFLN);
+
+ tmp = (mode->vtotal - mode->vsync_start + 1) << 16;
+ tmp |= mode->htotal - mode->hsync_start;
+ writel(tmp, vtg->regs + VTG_VID_TFO);
+ writel(tmp, vtg->regs + VTG_VID_BFO);
+
+ tmp = (mode->vdisplay + mode->vtotal - mode->vsync_start + 1) << 16;
+ tmp |= mode->hdisplay + mode->htotal - mode->hsync_start;
+ writel(tmp, vtg->regs + VTG_VID_TFS);
+ writel(tmp, vtg->regs + VTG_VID_BFS);
+
+ /* prepare VTG set 1 and 2 for HDMI and VTG set 3 for HD DAC */
+ tmp = (mode->hsync_end - mode->hsync_start) << 16;
+ writel(tmp, vtg->regs + VTG_H_HD_1);
+ writel(tmp, vtg->regs + VTG_H_HD_2);
+
+ tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
+ tmp |= 1;
+ writel(tmp, vtg->regs + VTG_TOP_V_VD_1);
+ writel(tmp, vtg->regs + VTG_BOT_V_VD_1);
+ writel(0, vtg->regs + VTG_TOP_V_HD_1);
+ writel(0, vtg->regs + VTG_BOT_V_HD_1);
+
+ /* prepare VTG set 2 for for HD DCS */
+ writel(tmp, vtg->regs + VTG_TOP_V_VD_2);
+ writel(tmp, vtg->regs + VTG_BOT_V_VD_2);
+ writel(0, vtg->regs + VTG_TOP_V_HD_2);
+ writel(0, vtg->regs + VTG_BOT_V_HD_2);
+
+ /* prepare VTG set 3 for HD Analog in HD mode */
+ tmp = (mode->hsync_end - mode->hsync_start + AWG_DELAY_HD) << 16;
+ tmp |= mode->htotal + AWG_DELAY_HD;
+ writel(tmp, vtg->regs + VTG_H_HD_3);
+
+ tmp = (mode->vsync_end - mode->vsync_start) << 16;
+ tmp |= mode->vtotal;
+ writel(tmp, vtg->regs + VTG_TOP_V_VD_3);
+ writel(tmp, vtg->regs + VTG_BOT_V_VD_3);
+
+ tmp = (mode->htotal + AWG_DELAY_HD) << 16;
+ tmp |= mode->htotal + AWG_DELAY_HD;
+ writel(tmp, vtg->regs + VTG_TOP_V_HD_3);
+ writel(tmp, vtg->regs + VTG_BOT_V_HD_3);
+
+ /* mode */
+ writel(type, vtg->regs + VTG_MODE);
+}
+
+static void vtg_enable_irq(struct sti_vtg *vtg)
+{
+ /* clear interrupt status and mask */
+ writel(0xFFFF, vtg->regs + VTG_HOST_ITS_BCLR);
+ writel(0xFFFF, vtg->regs + VTG_HOST_ITM_BCLR);
+ writel(VTG_IRQ_MASK, vtg->regs + VTG_HOST_ITM_BSET);
+}
+
+void sti_vtg_set_config(struct sti_vtg *vtg,
+ const struct drm_display_mode *mode)
+{
+ /* write configuration */
+ vtg_set_mode(vtg, VTG_TYPE_MASTER, mode);
+
+ vtg_reset(vtg);
+
+ /* enable irq for the vtg vblank synchro */
+ if (vtg->slave)
+ vtg_enable_irq(vtg->slave);
+ else
+ vtg_enable_irq(vtg);
+}
+EXPORT_SYMBOL(sti_vtg_set_config);
+
+/**
+ * sti_vtg_get_line_number
+ *
+ * @mode: display mode to be used
+ * @y: line
+ *
+ * Return the line number according to the display mode taking
+ * into account the Sync and Back Porch information.
+ * Video frame line numbers start at 1, y starts at 0.
+ * In interlaced modes the start line is the field line number of the odd
+ * field, but y is still defined as a progressive frame.
+ */
+u32 sti_vtg_get_line_number(struct drm_display_mode mode, int y)
+{
+ u32 start_line = mode.vtotal - mode.vsync_start + 1;
+
+ if (mode.flags & DRM_MODE_FLAG_INTERLACE)
+ start_line *= 2;
+
+ return start_line + y;
+}
+EXPORT_SYMBOL(sti_vtg_get_line_number);
+
+/**
+ * sti_vtg_get_pixel_number
+ *
+ * @mode: display mode to be used
+ * @x: row
+ *
+ * Return the pixel number according to the display mode taking
+ * into account the Sync and Back Porch information.
+ * Pixels are counted from 0.
+ */
+u32 sti_vtg_get_pixel_number(struct drm_display_mode mode, int x)
+{
+ return mode.htotal - mode.hsync_start + x;
+}
+EXPORT_SYMBOL(sti_vtg_get_pixel_number);
+
+int sti_vtg_register_client(struct sti_vtg *vtg,
+ struct notifier_block *nb, int crtc_id)
+{
+ if (vtg->slave)
+ return sti_vtg_register_client(vtg->slave, nb, crtc_id);
+
+ vtg->crtc_id = crtc_id;
+ return raw_notifier_chain_register(&vtg->notifier_list, nb);
+}
+EXPORT_SYMBOL(sti_vtg_register_client);
+
+int sti_vtg_unregister_client(struct sti_vtg *vtg, struct notifier_block *nb)
+{
+ if (vtg->slave)
+ return sti_vtg_unregister_client(vtg->slave, nb);
+
+ return raw_notifier_chain_unregister(&vtg->notifier_list, nb);
+}
+EXPORT_SYMBOL(sti_vtg_unregister_client);
+
+static irqreturn_t vtg_irq_thread(int irq, void *arg)
+{
+ struct sti_vtg *vtg = arg;
+ u32 event;
+
+ event = (vtg->irq_status & VTG_IRQ_TOP) ?
+ VTG_TOP_FIELD_EVENT : VTG_BOTTOM_FIELD_EVENT;
+
+ raw_notifier_call_chain(&vtg->notifier_list, event, &vtg->crtc_id);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vtg_irq(int irq, void *arg)
+{
+ struct sti_vtg *vtg = arg;
+
+ vtg->irq_status = readl(vtg->regs + VTG_HOST_ITS);
+
+ writel(vtg->irq_status, vtg->regs + VTG_HOST_ITS_BCLR);
+
+ /* force sync bus write */
+ readl(vtg->regs + VTG_HOST_ITS);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int vtg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ struct sti_vtg *vtg;
+ struct resource *res;
+ char irq_name[32];
+ int ret;
+
+ vtg = devm_kzalloc(dev, sizeof(*vtg), GFP_KERNEL);
+ if (!vtg)
+ return -ENOMEM;
+
+ vtg->dev = dev;
+ vtg->np = pdev->dev.of_node;
+
+ /* Get Memory ressources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DRM_ERROR("Get memory resource failed\n");
+ return -ENOMEM;
+ }
+ vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+
+ np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0);
+ if (np) {
+ vtg->slave = of_vtg_find(np);
+
+ if (!vtg->slave)
+ return -EPROBE_DEFER;
+ } else {
+ vtg->irq = platform_get_irq(pdev, 0);
+ if (IS_ERR_VALUE(vtg->irq)) {
+ DRM_ERROR("Failed to get VTG interrupt\n");
+ return vtg->irq;
+ }
+
+ snprintf(irq_name, sizeof(irq_name), "vsync-%s",
+ dev_name(vtg->dev));
+
+ RAW_INIT_NOTIFIER_HEAD(&vtg->notifier_list);
+
+ ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq,
+ vtg_irq_thread, IRQF_ONESHOT, irq_name, vtg);
+ if (IS_ERR_VALUE(ret)) {
+ DRM_ERROR("Failed to register VTG interrupt\n");
+ return ret;
+ }
+ }
+
+ vtg_register(vtg);
+ platform_set_drvdata(pdev, vtg);
+
+ DRM_INFO("%s %s\n", __func__, dev_name(vtg->dev));
+
+ return 0;
+}
+
+static int vtg_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id vtg_of_match[] = {
+ { .compatible = "st,vtg", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vtg_of_match);
+
+struct platform_driver sti_vtg_driver = {
+ .driver = {
+ .name = "sti-vtg",
+ .owner = THIS_MODULE,
+ .of_match_table = vtg_of_match,
+ },
+ .probe = vtg_probe,
+ .remove = vtg_remove,
+};
+
+module_platform_driver(sti_vtg_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_vtg.h b/drivers/gpu/drm/sti/sti_vtg.h
new file mode 100644
index 000000000000..e84d23f1f57f
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_vtg.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_VTG_H_
+#define _STI_VTG_H_
+
+#define VTG_TOP_FIELD_EVENT 1
+#define VTG_BOTTOM_FIELD_EVENT 2
+
+struct sti_vtg;
+struct drm_display_mode;
+struct notifier_block;
+
+struct sti_vtg *of_vtg_find(struct device_node *np);
+void sti_vtg_set_config(struct sti_vtg *vtg,
+ const struct drm_display_mode *mode);
+int sti_vtg_register_client(struct sti_vtg *vtg,
+ struct notifier_block *nb, int crtc_id);
+int sti_vtg_unregister_client(struct sti_vtg *vtg,
+ struct notifier_block *nb);
+
+u32 sti_vtg_get_line_number(struct drm_display_mode mode, int y);
+u32 sti_vtg_get_pixel_number(struct drm_display_mode mode, int x);
+
+#endif
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index ef40381f3909..6553fd238685 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -18,6 +18,8 @@
struct tegra_dc_soc_info {
bool supports_interlacing;
bool supports_cursor;
+ bool supports_block_linear;
+ unsigned int pitch_align;
};
struct tegra_plane {
@@ -212,15 +214,44 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
- if (window->tiled) {
- value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
- DC_WIN_BUFFER_ADDR_MODE_TILE;
+ if (dc->soc->supports_block_linear) {
+ unsigned long height = window->tiling.value;
+
+ switch (window->tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ value = DC_WINBUF_SURFACE_KIND_PITCH;
+ break;
+
+ case TEGRA_BO_TILING_MODE_TILED:
+ value = DC_WINBUF_SURFACE_KIND_TILED;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
+ DC_WINBUF_SURFACE_KIND_BLOCK;
+ break;
+ }
+
+ tegra_dc_writel(dc, value, DC_WINBUF_SURFACE_KIND);
} else {
- value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
- DC_WIN_BUFFER_ADDR_MODE_LINEAR;
- }
+ switch (window->tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
+ DC_WIN_BUFFER_ADDR_MODE_LINEAR;
+ break;
- tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+ case TEGRA_BO_TILING_MODE_TILED:
+ value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
+ DC_WIN_BUFFER_ADDR_MODE_TILE;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ DRM_ERROR("hardware doesn't support block linear mode\n");
+ return -EINVAL;
+ }
+
+ tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+ }
value = WIN_ENABLE;
@@ -288,6 +319,7 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct tegra_dc *dc = to_tegra_dc(crtc);
struct tegra_dc_window window;
unsigned int i;
+ int err;
memset(&window, 0, sizeof(window));
window.src.x = src_x >> 16;
@@ -301,7 +333,10 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
window.format = tegra_dc_format(fb->pixel_format, &window.swap);
window.bits_per_pixel = fb->bits_per_pixel;
window.bottom_up = tegra_fb_is_bottom_up(fb);
- window.tiled = tegra_fb_is_tiled(fb);
+
+ err = tegra_fb_get_tiling(fb, &window.tiling);
+ if (err < 0)
+ return err;
for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
@@ -402,8 +437,14 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
{
struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
unsigned int h_offset = 0, v_offset = 0;
+ struct tegra_bo_tiling tiling;
unsigned int format, swap;
unsigned long value;
+ int err;
+
+ err = tegra_fb_get_tiling(fb, &tiling);
+ if (err < 0)
+ return err;
tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -417,15 +458,44 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH);
tegra_dc_writel(dc, swap, DC_WIN_BYTE_SWAP);
- if (tegra_fb_is_tiled(fb)) {
- value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
- DC_WIN_BUFFER_ADDR_MODE_TILE;
+ if (dc->soc->supports_block_linear) {
+ unsigned long height = tiling.value;
+
+ switch (tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ value = DC_WINBUF_SURFACE_KIND_PITCH;
+ break;
+
+ case TEGRA_BO_TILING_MODE_TILED:
+ value = DC_WINBUF_SURFACE_KIND_TILED;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
+ DC_WINBUF_SURFACE_KIND_BLOCK;
+ break;
+ }
+
+ tegra_dc_writel(dc, value, DC_WINBUF_SURFACE_KIND);
} else {
- value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
- DC_WIN_BUFFER_ADDR_MODE_LINEAR;
- }
+ switch (tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
+ DC_WIN_BUFFER_ADDR_MODE_LINEAR;
+ break;
+
+ case TEGRA_BO_TILING_MODE_TILED:
+ value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
+ DC_WIN_BUFFER_ADDR_MODE_TILE;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ DRM_ERROR("hardware doesn't support block linear mode\n");
+ return -EINVAL;
+ }
- tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+ tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+ }
/* make sure bottom-up buffers are properly displayed */
if (tegra_fb_is_bottom_up(fb)) {
@@ -1214,12 +1284,20 @@ static int tegra_dc_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_dc *dc = host1x_client_to_dc(client);
+ struct tegra_drm *tegra = drm->dev_private;
int err;
drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
drm_mode_crtc_set_gamma_size(&dc->base, 256);
drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
+ /*
+ * Keep track of the minimum pitch alignment across all display
+ * controllers.
+ */
+ if (dc->soc->pitch_align > tegra->pitch_align)
+ tegra->pitch_align = dc->soc->pitch_align;
+
err = tegra_dc_rgb_init(drm, dc);
if (err < 0 && err != -ENODEV) {
dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
@@ -1277,16 +1355,29 @@ static const struct host1x_client_ops dc_client_ops = {
static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
.supports_interlacing = false,
.supports_cursor = false,
+ .supports_block_linear = false,
+ .pitch_align = 8,
};
static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
.supports_interlacing = false,
.supports_cursor = false,
+ .supports_block_linear = false,
+ .pitch_align = 8,
+};
+
+static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
+ .supports_interlacing = false,
+ .supports_cursor = false,
+ .supports_block_linear = false,
+ .pitch_align = 64,
};
static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
.supports_interlacing = true,
.supports_cursor = true,
+ .supports_block_linear = true,
+ .pitch_align = 64,
};
static const struct of_device_id tegra_dc_of_match[] = {
@@ -1303,6 +1394,7 @@ static const struct of_device_id tegra_dc_of_match[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, tegra_dc_of_match);
static int tegra_dc_parse_dt(struct tegra_dc *dc)
{
@@ -1430,6 +1522,7 @@ static int tegra_dc_remove(struct platform_device *pdev)
return err;
}
+ reset_control_assert(dc->rst);
clk_disable_unprepare(dc->clk);
return 0;
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 78c5feff95d2..705c93b00794 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -428,6 +428,11 @@
#define DC_WINBUF_ADDR_V_OFFSET_NS 0x809
#define DC_WINBUF_UFLOW_STATUS 0x80a
+#define DC_WINBUF_SURFACE_KIND 0x80b
+#define DC_WINBUF_SURFACE_KIND_PITCH (0 << 0)
+#define DC_WINBUF_SURFACE_KIND_TILED (1 << 0)
+#define DC_WINBUF_SURFACE_KIND_BLOCK (2 << 0)
+#define DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(x) (((x) & 0x7) << 4)
#define DC_WINBUF_AD_UFLOW_STATUS 0xbca
#define DC_WINBUF_BD_UFLOW_STATUS 0xdca
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 3f132e356e9c..708f783ead47 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -382,6 +382,7 @@ static const struct of_device_id tegra_dpaux_of_match[] = {
{ .compatible = "nvidia,tegra124-dpaux", },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra_dpaux_of_match);
struct platform_driver tegra_dpaux_driver = {
.driver = {
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 3396f9f6a9f7..59736bb810cd 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -40,6 +40,12 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
drm_mode_config_init(drm);
+ err = tegra_drm_fb_prepare(drm);
+ if (err < 0)
+ return err;
+
+ drm_kms_helper_poll_init(drm);
+
err = host1x_device_init(device);
if (err < 0)
return err;
@@ -59,8 +65,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
if (err < 0)
return err;
- drm_kms_helper_poll_init(drm);
-
return 0;
}
@@ -128,6 +132,45 @@ host1x_bo_lookup(struct drm_device *drm, struct drm_file *file, u32 handle)
return &bo->base;
}
+static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
+ struct drm_tegra_reloc __user *src,
+ struct drm_device *drm,
+ struct drm_file *file)
+{
+ u32 cmdbuf, target;
+ int err;
+
+ err = get_user(cmdbuf, &src->cmdbuf.handle);
+ if (err < 0)
+ return err;
+
+ err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
+ if (err < 0)
+ return err;
+
+ err = get_user(target, &src->target.handle);
+ if (err < 0)
+ return err;
+
+ err = get_user(dest->target.offset, &src->cmdbuf.offset);
+ if (err < 0)
+ return err;
+
+ err = get_user(dest->shift, &src->shift);
+ if (err < 0)
+ return err;
+
+ dest->cmdbuf.bo = host1x_bo_lookup(drm, file, cmdbuf);
+ if (!dest->cmdbuf.bo)
+ return -ENOENT;
+
+ dest->target.bo = host1x_bo_lookup(drm, file, target);
+ if (!dest->target.bo)
+ return -ENOENT;
+
+ return 0;
+}
+
int tegra_drm_submit(struct tegra_drm_context *context,
struct drm_tegra_submit *args, struct drm_device *drm,
struct drm_file *file)
@@ -180,26 +223,13 @@ int tegra_drm_submit(struct tegra_drm_context *context,
cmdbufs++;
}
- if (copy_from_user(job->relocarray, relocs,
- sizeof(*relocs) * num_relocs)) {
- err = -EFAULT;
- goto fail;
- }
-
+ /* copy and resolve relocations from submit */
while (num_relocs--) {
- struct host1x_reloc *reloc = &job->relocarray[num_relocs];
- struct host1x_bo *cmdbuf, *target;
-
- cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
- target = host1x_bo_lookup(drm, file, (u32)reloc->target);
-
- reloc->cmdbuf = cmdbuf;
- reloc->target = target;
-
- if (!reloc->target || !reloc->cmdbuf) {
- err = -ENOENT;
+ err = host1x_reloc_copy_from_user(&job->relocarray[num_relocs],
+ &relocs[num_relocs], drm,
+ file);
+ if (err < 0)
goto fail;
- }
}
if (copy_from_user(job->waitchk, waitchks,
@@ -451,11 +481,151 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
return 0;
}
+
+static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_set_tiling *args = data;
+ enum tegra_bo_tiling_mode mode;
+ struct drm_gem_object *gem;
+ unsigned long value = 0;
+ struct tegra_bo *bo;
+
+ switch (args->mode) {
+ case DRM_TEGRA_GEM_TILING_MODE_PITCH:
+ mode = TEGRA_BO_TILING_MODE_PITCH;
+
+ if (args->value != 0)
+ return -EINVAL;
+
+ break;
+
+ case DRM_TEGRA_GEM_TILING_MODE_TILED:
+ mode = TEGRA_BO_TILING_MODE_TILED;
+
+ if (args->value != 0)
+ return -EINVAL;
+
+ break;
+
+ case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
+ mode = TEGRA_BO_TILING_MODE_BLOCK;
+
+ if (args->value > 5)
+ return -EINVAL;
+
+ value = args->value;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ gem = drm_gem_object_lookup(drm, file, args->handle);
+ if (!gem)
+ return -ENOENT;
+
+ bo = to_tegra_bo(gem);
+
+ bo->tiling.mode = mode;
+ bo->tiling.value = value;
+
+ drm_gem_object_unreference(gem);
+
+ return 0;
+}
+
+static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_get_tiling *args = data;
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+ int err = 0;
+
+ gem = drm_gem_object_lookup(drm, file, args->handle);
+ if (!gem)
+ return -ENOENT;
+
+ bo = to_tegra_bo(gem);
+
+ switch (bo->tiling.mode) {
+ case TEGRA_BO_TILING_MODE_PITCH:
+ args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
+ args->value = 0;
+ break;
+
+ case TEGRA_BO_TILING_MODE_TILED:
+ args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
+ args->value = 0;
+ break;
+
+ case TEGRA_BO_TILING_MODE_BLOCK:
+ args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
+ args->value = bo->tiling.value;
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ drm_gem_object_unreference(gem);
+
+ return err;
+}
+
+static int tegra_gem_set_flags(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_set_flags *args = data;
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+
+ if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
+ return -EINVAL;
+
+ gem = drm_gem_object_lookup(drm, file, args->handle);
+ if (!gem)
+ return -ENOENT;
+
+ bo = to_tegra_bo(gem);
+ bo->flags = 0;
+
+ if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
+ bo->flags |= TEGRA_BO_BOTTOM_UP;
+
+ drm_gem_object_unreference(gem);
+
+ return 0;
+}
+
+static int tegra_gem_get_flags(struct drm_device *drm, void *data,
+ struct drm_file *file)
+{
+ struct drm_tegra_gem_get_flags *args = data;
+ struct drm_gem_object *gem;
+ struct tegra_bo *bo;
+
+ gem = drm_gem_object_lookup(drm, file, args->handle);
+ if (!gem)
+ return -ENOENT;
+
+ bo = to_tegra_bo(gem);
+ args->flags = 0;
+
+ if (bo->flags & TEGRA_BO_BOTTOM_UP)
+ args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
+
+ drm_gem_object_unreference(gem);
+
+ return 0;
+}
#endif
static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
#ifdef CONFIG_DRM_TEGRA_STAGING
- DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr, DRM_UNLOCKED),
@@ -465,6 +635,10 @@ static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags, DRM_UNLOCKED),
#endif
};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 6b8fe9d86ed4..e89c70fa82d5 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -19,6 +19,8 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fixed.h>
+#include "gem.h"
+
struct reset_control;
struct tegra_fb {
@@ -43,6 +45,8 @@ struct tegra_drm {
#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_fbdev *fbdev;
#endif
+
+ unsigned int pitch_align;
};
struct tegra_drm_client;
@@ -160,7 +164,8 @@ struct tegra_dc_window {
unsigned int stride[2];
unsigned long base[3];
bool bottom_up;
- bool tiled;
+
+ struct tegra_bo_tiling tiling;
};
/* from dc.c */
@@ -279,7 +284,9 @@ int tegra_dpaux_train(struct tegra_dpaux *dpaux, struct drm_dp_link *link,
struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
unsigned int index);
bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
-bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer);
+int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
+ struct tegra_bo_tiling *tiling);
+int tegra_drm_fb_prepare(struct drm_device *drm);
int tegra_drm_fb_init(struct drm_device *drm);
void tegra_drm_fb_exit(struct drm_device *drm);
#ifdef CONFIG_DRM_TEGRA_FBDEV
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index bd56f2affa78..f7874458926a 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -474,7 +474,8 @@ static int tegra_output_dsi_enable(struct tegra_output *output)
tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
value = tegra_dsi_readl(dsi, DSI_CONTROL);
- value |= DSI_CONTROL_HS_CLK_CTRL;
+ if (dsi->flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
+ value |= DSI_CONTROL_HS_CLK_CTRL;
value &= ~DSI_CONTROL_TX_TRIG(3);
value &= ~DSI_CONTROL_DCS_ENABLE;
value |= DSI_CONTROL_VIDEO_ENABLE;
@@ -982,6 +983,7 @@ static const struct of_device_id tegra_dsi_of_match[] = {
{ .compatible = "nvidia,tegra114-dsi", },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra_dsi_of_match);
struct platform_driver tegra_dsi_driver = {
.driver = {
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 9798a7080322..3513d12d5aa1 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -46,14 +46,15 @@ bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer)
return false;
}
-bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer)
+int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
+ struct tegra_bo_tiling *tiling)
{
struct tegra_fb *fb = to_tegra_fb(framebuffer);
- if (fb->planes[0]->flags & TEGRA_BO_TILED)
- return true;
+ /* TODO: handle YUV formats? */
+ *tiling = fb->planes[0]->tiling;
- return false;
+ return 0;
}
static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
@@ -193,6 +194,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct tegra_fbdev *fbdev = to_tegra_fbdev(helper);
+ struct tegra_drm *tegra = helper->dev->dev_private;
struct drm_device *drm = helper->dev;
struct drm_mode_fb_cmd2 cmd = { 0 };
unsigned int bytes_per_pixel;
@@ -207,7 +209,8 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
cmd.width = sizes->surface_width;
cmd.height = sizes->surface_height;
- cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
+ cmd.pitches[0] = round_up(sizes->surface_width * bytes_per_pixel,
+ tegra->pitch_align);
cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
@@ -267,18 +270,13 @@ release:
return err;
}
-static struct drm_fb_helper_funcs tegra_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs tegra_fb_helper_funcs = {
.fb_probe = tegra_fbdev_probe,
};
-static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm,
- unsigned int preferred_bpp,
- unsigned int num_crtc,
- unsigned int max_connectors)
+static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm)
{
- struct drm_fb_helper *helper;
struct tegra_fbdev *fbdev;
- int err;
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
if (!fbdev) {
@@ -286,13 +284,23 @@ static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm,
return ERR_PTR(-ENOMEM);
}
- fbdev->base.funcs = &tegra_fb_helper_funcs;
- helper = &fbdev->base;
+ drm_fb_helper_prepare(drm, &fbdev->base, &tegra_fb_helper_funcs);
+
+ return fbdev;
+}
+
+static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
+ unsigned int preferred_bpp,
+ unsigned int num_crtc,
+ unsigned int max_connectors)
+{
+ struct drm_device *drm = fbdev->base.dev;
+ int err;
err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors);
if (err < 0) {
dev_err(drm->dev, "failed to initialize DRM FB helper\n");
- goto free;
+ return err;
}
err = drm_fb_helper_single_add_all_connectors(&fbdev->base);
@@ -301,21 +309,17 @@ static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm,
goto fini;
}
- drm_helper_disable_unused_functions(drm);
-
err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
if (err < 0) {
dev_err(drm->dev, "failed to set initial configuration\n");
goto fini;
}
- return fbdev;
+ return 0;
fini:
drm_fb_helper_fini(&fbdev->base);
-free:
- kfree(fbdev);
- return ERR_PTR(err);
+ return err;
}
static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
@@ -366,7 +370,7 @@ static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
#endif
};
-int tegra_drm_fb_init(struct drm_device *drm)
+int tegra_drm_fb_prepare(struct drm_device *drm)
{
#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_drm *tegra = drm->dev_private;
@@ -381,8 +385,7 @@ int tegra_drm_fb_init(struct drm_device *drm)
drm->mode_config.funcs = &tegra_drm_mode_funcs;
#ifdef CONFIG_DRM_TEGRA_FBDEV
- tegra->fbdev = tegra_fbdev_create(drm, 32, drm->mode_config.num_crtc,
- drm->mode_config.num_connector);
+ tegra->fbdev = tegra_fbdev_create(drm);
if (IS_ERR(tegra->fbdev))
return PTR_ERR(tegra->fbdev);
#endif
@@ -390,6 +393,21 @@ int tegra_drm_fb_init(struct drm_device *drm)
return 0;
}
+int tegra_drm_fb_init(struct drm_device *drm)
+{
+#ifdef CONFIG_DRM_TEGRA_FBDEV
+ struct tegra_drm *tegra = drm->dev_private;
+ int err;
+
+ err = tegra_fbdev_init(tegra->fbdev, 32, drm->mode_config.num_crtc,
+ drm->mode_config.num_connector);
+ if (err < 0)
+ return err;
+#endif
+
+ return 0;
+}
+
void tegra_drm_fb_exit(struct drm_device *drm)
{
#ifdef CONFIG_DRM_TEGRA_FBDEV
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 78cc8143760a..ce023fa3e8ae 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -16,6 +16,7 @@
#include <linux/dma-buf.h>
#include <drm/tegra_drm.h>
+#include "drm.h"
#include "gem.h"
static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
@@ -126,7 +127,7 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
goto err_mmap;
if (flags & DRM_TEGRA_GEM_CREATE_TILED)
- bo->flags |= TEGRA_BO_TILED;
+ bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
bo->flags |= TEGRA_BO_BOTTOM_UP;
@@ -259,8 +260,10 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ struct tegra_drm *tegra = drm->dev_private;
struct tegra_bo *bo;
+ min_pitch = round_up(min_pitch, tegra->pitch_align);
if (args->pitch < min_pitch)
args->pitch = min_pitch;
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 2f3fe96c5154..43a25c853357 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -16,8 +16,18 @@
#include <drm/drm.h>
#include <drm/drmP.h>
-#define TEGRA_BO_TILED (1 << 0)
-#define TEGRA_BO_BOTTOM_UP (1 << 1)
+#define TEGRA_BO_BOTTOM_UP (1 << 0)
+
+enum tegra_bo_tiling_mode {
+ TEGRA_BO_TILING_MODE_PITCH,
+ TEGRA_BO_TILING_MODE_TILED,
+ TEGRA_BO_TILING_MODE_BLOCK,
+};
+
+struct tegra_bo_tiling {
+ enum tegra_bo_tiling_mode mode;
+ unsigned long value;
+};
struct tegra_bo {
struct drm_gem_object gem;
@@ -26,6 +36,8 @@ struct tegra_bo {
struct sg_table *sgt;
dma_addr_t paddr;
void *vaddr;
+
+ struct tegra_bo_tiling tiling;
};
static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
index 7c53941f2a9e..02cd3e37a6ec 100644
--- a/drivers/gpu/drm/tegra/gr2d.c
+++ b/drivers/gpu/drm/tegra/gr2d.c
@@ -121,6 +121,7 @@ static const struct of_device_id gr2d_match[] = {
{ .compatible = "nvidia,tegra20-gr2d" },
{ },
};
+MODULE_DEVICE_TABLE(of, gr2d_match);
static const u32 gr2d_addr_regs[] = {
GR2D_UA_BASE_ADDR,
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
index 30f5ba9bd6d0..2bea2b2d204e 100644
--- a/drivers/gpu/drm/tegra/gr3d.c
+++ b/drivers/gpu/drm/tegra/gr3d.c
@@ -130,6 +130,7 @@ static const struct of_device_id tegra_gr3d_match[] = {
{ .compatible = "nvidia,tegra20-gr3d" },
{ }
};
+MODULE_DEVICE_TABLE(of, tegra_gr3d_match);
static const u32 gr3d_addr_regs[] = {
GR3D_IDX_ATTRIBUTE( 0),
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index ba067bb767e3..ffe26547328d 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1450,6 +1450,7 @@ static const struct of_device_id tegra_hdmi_of_match[] = {
{ .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra_hdmi_of_match);
static int tegra_hdmi_probe(struct platform_device *pdev)
{
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index a3e4f1eca6f7..0c67d7eebc94 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -105,7 +105,7 @@ static void drm_connector_clear(struct drm_connector *connector)
static void tegra_connector_destroy(struct drm_connector *connector)
{
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
drm_connector_clear(connector);
}
@@ -140,7 +140,9 @@ static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode)
if (mode != DRM_MODE_DPMS_ON) {
drm_panel_disable(panel);
tegra_output_disable(output);
+ drm_panel_unprepare(panel);
} else {
+ drm_panel_prepare(panel);
tegra_output_enable(output);
drm_panel_enable(panel);
}
@@ -318,7 +320,7 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
drm_mode_connector_attach_encoder(&output->connector, &output->encoder);
- drm_sysfs_connector_add(&output->connector);
+ drm_connector_register(&output->connector);
output->encoder.possible_crtcs = 0x3;
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 27c979b50111..0410e467b828 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -516,7 +516,7 @@ static int tegra_output_sor_enable(struct tegra_output *output)
if (err < 0) {
dev_err(sor->dev, "failed to probe eDP link: %d\n",
err);
- return err;
+ goto unlock;
}
}
@@ -525,7 +525,7 @@ static int tegra_output_sor_enable(struct tegra_output *output)
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
memset(&config, 0, sizeof(config));
- config.bits_per_pixel = 24; /* XXX: don't hardcode? */
+ config.bits_per_pixel = output->connector.display_info.bpc * 3;
err = tegra_sor_calc_config(sor, mode, &config, &link);
if (err < 0)
@@ -815,12 +815,22 @@ static int tegra_output_sor_enable(struct tegra_output *output)
* configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
* raster, associate with display controller)
*/
- value = SOR_STATE_ASY_VSYNCPOL |
- SOR_STATE_ASY_HSYNCPOL |
- SOR_STATE_ASY_PROTOCOL_DP_A |
+ value = SOR_STATE_ASY_PROTOCOL_DP_A |
SOR_STATE_ASY_CRC_MODE_COMPLETE |
SOR_STATE_ASY_OWNER(dc->pipe + 1);
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ value &= ~SOR_STATE_ASY_HSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ value |= SOR_STATE_ASY_HSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ value &= ~SOR_STATE_ASY_VSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ value |= SOR_STATE_ASY_VSYNCPOL;
+
switch (config.bits_per_pixel) {
case 24:
value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
@@ -1455,6 +1465,7 @@ static const struct of_device_id tegra_sor_of_match[] = {
{ .compatible = "nvidia,tegra124-sor", },
{ },
};
+MODULE_DEVICE_TABLE(of, tegra_sor_of_match);
struct platform_driver tegra_sor_driver = {
.driver = {
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index b20b69488dc9..6be623b4a86f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -120,8 +120,8 @@ static int cpufreq_transition(struct notifier_block *nb,
static int tilcdc_unload(struct drm_device *dev)
{
struct tilcdc_drm_private *priv = dev->dev_private;
- struct tilcdc_module *mod, *cur;
+ drm_fbdev_cma_fini(priv->fbdev);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
drm_vblank_cleanup(dev);
@@ -148,11 +148,6 @@ static int tilcdc_unload(struct drm_device *dev)
pm_runtime_disable(dev->dev);
- list_for_each_entry_safe(mod, cur, &module_list, list) {
- DBG("destroying module: %s", mod->name);
- mod->funcs->destroy(mod);
- }
-
kfree(priv);
return 0;
@@ -628,13 +623,13 @@ static int __init tilcdc_drm_init(void)
static void __exit tilcdc_drm_fini(void)
{
DBG("fini");
- tilcdc_tfp410_fini();
- tilcdc_slave_fini();
- tilcdc_panel_fini();
platform_driver_unregister(&tilcdc_platform_driver);
+ tilcdc_panel_fini();
+ tilcdc_slave_fini();
+ tilcdc_tfp410_fini();
}
-late_initcall(tilcdc_drm_init);
+module_init(tilcdc_drm_init);
module_exit(tilcdc_drm_fini);
MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index 093803683b25..7596c144a9fb 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -98,7 +98,6 @@ struct tilcdc_module;
struct tilcdc_module_ops {
/* create appropriate encoders/connectors: */
int (*modeset_init)(struct tilcdc_module *mod, struct drm_device *dev);
- void (*destroy)(struct tilcdc_module *mod);
#ifdef CONFIG_DEBUG_FS
/* create debugfs nodes (can be NULL): */
int (*debugfs_init)(struct tilcdc_module *mod, struct drm_minor *minor);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 86c67329b605..4c7aa1d8134f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -151,6 +151,7 @@ struct panel_connector {
static void panel_connector_destroy(struct drm_connector *connector)
{
struct panel_connector *panel_connector = to_panel_connector(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(panel_connector);
}
@@ -247,7 +248,7 @@ static struct drm_connector *panel_connector_create(struct drm_device *dev,
if (ret)
goto fail;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return connector;
@@ -281,23 +282,8 @@ static int panel_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
return 0;
}
-static void panel_destroy(struct tilcdc_module *mod)
-{
- struct panel_module *panel_mod = to_panel_module(mod);
-
- if (panel_mod->timings) {
- display_timings_release(panel_mod->timings);
- kfree(panel_mod->timings);
- }
-
- tilcdc_module_cleanup(mod);
- kfree(panel_mod->info);
- kfree(panel_mod);
-}
-
static const struct tilcdc_module_ops panel_module_ops = {
.modeset_init = panel_modeset_init,
- .destroy = panel_destroy,
};
/*
@@ -373,6 +359,7 @@ static int panel_probe(struct platform_device *pdev)
return -ENOMEM;
mod = &panel_mod->base;
+ pdev->dev.platform_data = mod;
tilcdc_module_init(mod, "panel", &panel_module_ops);
@@ -380,17 +367,16 @@ static int panel_probe(struct platform_device *pdev)
if (IS_ERR(pinctrl))
dev_warn(&pdev->dev, "pins are not configured\n");
-
panel_mod->timings = of_get_display_timings(node);
if (!panel_mod->timings) {
dev_err(&pdev->dev, "could not get panel timings\n");
- goto fail;
+ goto fail_free;
}
panel_mod->info = of_get_panel_info(node);
if (!panel_mod->info) {
dev_err(&pdev->dev, "could not get panel info\n");
- goto fail;
+ goto fail_timings;
}
mod->preferred_bpp = panel_mod->info->bpp;
@@ -401,13 +387,26 @@ static int panel_probe(struct platform_device *pdev)
return 0;
-fail:
- panel_destroy(mod);
+fail_timings:
+ display_timings_release(panel_mod->timings);
+
+fail_free:
+ kfree(panel_mod);
+ tilcdc_module_cleanup(mod);
return ret;
}
static int panel_remove(struct platform_device *pdev)
{
+ struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
+ struct panel_module *panel_mod = to_panel_module(mod);
+
+ display_timings_release(panel_mod->timings);
+
+ tilcdc_module_cleanup(mod);
+ kfree(panel_mod->info);
+ kfree(panel_mod);
+
return 0;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
index 595068ba2d5e..3775fd49dac4 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -166,6 +166,7 @@ struct slave_connector {
static void slave_connector_destroy(struct drm_connector *connector)
{
struct slave_connector *slave_connector = to_slave_connector(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(slave_connector);
}
@@ -261,7 +262,7 @@ static struct drm_connector *slave_connector_create(struct drm_device *dev,
if (ret)
goto fail;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return connector;
@@ -295,17 +296,8 @@ static int slave_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
return 0;
}
-static void slave_destroy(struct tilcdc_module *mod)
-{
- struct slave_module *slave_mod = to_slave_module(mod);
-
- tilcdc_module_cleanup(mod);
- kfree(slave_mod);
-}
-
static const struct tilcdc_module_ops slave_module_ops = {
.modeset_init = slave_modeset_init,
- .destroy = slave_destroy,
};
/*
@@ -355,10 +347,13 @@ static int slave_probe(struct platform_device *pdev)
}
slave_mod = kzalloc(sizeof(*slave_mod), GFP_KERNEL);
- if (!slave_mod)
- return -ENOMEM;
+ if (!slave_mod) {
+ ret = -ENOMEM;
+ goto fail_adapter;
+ }
mod = &slave_mod->base;
+ pdev->dev.platform_data = mod;
mod->preferred_bpp = slave_info.bpp;
@@ -373,10 +368,20 @@ static int slave_probe(struct platform_device *pdev)
tilcdc_slave_probedefer(false);
return 0;
+
+fail_adapter:
+ i2c_put_adapter(slavei2c);
+ return ret;
}
static int slave_remove(struct platform_device *pdev)
{
+ struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
+ struct slave_module *slave_mod = to_slave_module(mod);
+
+ tilcdc_module_cleanup(mod);
+ kfree(slave_mod);
+
return 0;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index c38b56b268ac..354c47ca6374 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -167,6 +167,7 @@ struct tfp410_connector {
static void tfp410_connector_destroy(struct drm_connector *connector)
{
struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(tfp410_connector);
}
@@ -261,7 +262,7 @@ static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
if (ret)
goto fail;
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
return connector;
@@ -295,23 +296,8 @@ static int tfp410_modeset_init(struct tilcdc_module *mod, struct drm_device *dev
return 0;
}
-static void tfp410_destroy(struct tilcdc_module *mod)
-{
- struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
-
- if (tfp410_mod->i2c)
- i2c_put_adapter(tfp410_mod->i2c);
-
- if (!IS_ERR_VALUE(tfp410_mod->gpio))
- gpio_free(tfp410_mod->gpio);
-
- tilcdc_module_cleanup(mod);
- kfree(tfp410_mod);
-}
-
static const struct tilcdc_module_ops tfp410_module_ops = {
.modeset_init = tfp410_modeset_init,
- .destroy = tfp410_destroy,
};
/*
@@ -341,6 +327,7 @@ static int tfp410_probe(struct platform_device *pdev)
return -ENOMEM;
mod = &tfp410_mod->base;
+ pdev->dev.platform_data = mod;
tilcdc_module_init(mod, "tfp410", &tfp410_module_ops);
@@ -364,6 +351,7 @@ static int tfp410_probe(struct platform_device *pdev)
tfp410_mod->i2c = of_find_i2c_adapter_by_node(i2c_node);
if (!tfp410_mod->i2c) {
dev_err(&pdev->dev, "could not get i2c\n");
+ of_node_put(i2c_node);
goto fail;
}
@@ -377,19 +365,32 @@ static int tfp410_probe(struct platform_device *pdev)
ret = gpio_request(tfp410_mod->gpio, "DVI_PDn");
if (ret) {
dev_err(&pdev->dev, "could not get DVI_PDn gpio\n");
- goto fail;
+ goto fail_adapter;
}
}
return 0;
+fail_adapter:
+ i2c_put_adapter(tfp410_mod->i2c);
+
fail:
- tfp410_destroy(mod);
+ kfree(tfp410_mod);
+ tilcdc_module_cleanup(mod);
return ret;
}
static int tfp410_remove(struct platform_device *pdev)
{
+ struct tilcdc_module *mod = dev_get_platdata(&pdev->dev);
+ struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
+
+ i2c_put_adapter(tfp410_mod->i2c);
+ gpio_free(tfp410_mod->gpio);
+
+ tilcdc_module_cleanup(mod);
+ kfree(tfp410_mod);
+
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4ab9f7171c4f..3da89d5dab60 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -412,7 +412,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
int ret;
spin_lock(&glob->lru_lock);
- ret = __ttm_bo_reserve(bo, false, true, false, 0);
+ ret = __ttm_bo_reserve(bo, false, true, false, NULL);
spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, true);
@@ -514,7 +514,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
return ret;
spin_lock(&glob->lru_lock);
- ret = __ttm_bo_reserve(bo, false, true, false, 0);
+ ret = __ttm_bo_reserve(bo, false, true, false, NULL);
/*
* We raced, and lost, someone else holds the reservation now,
@@ -577,11 +577,11 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
kref_get(&nentry->list_kref);
}
- ret = __ttm_bo_reserve(entry, false, true, false, 0);
+ ret = __ttm_bo_reserve(entry, false, true, false, NULL);
if (remove_all && ret) {
spin_unlock(&glob->lru_lock);
ret = __ttm_bo_reserve(entry, false, false,
- false, 0);
+ false, NULL);
spin_lock(&glob->lru_lock);
}
@@ -726,7 +726,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
- ret = __ttm_bo_reserve(bo, false, true, false, 0);
+ ret = __ttm_bo_reserve(bo, false, true, false, NULL);
if (!ret)
break;
}
@@ -784,7 +784,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
int ret;
do {
- ret = (*man->func->get_node)(man, bo, placement, mem);
+ ret = (*man->func->get_node)(man, bo, placement, 0, mem);
if (unlikely(ret != 0))
return ret;
if (mem->mm_node)
@@ -897,7 +897,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (man->has_type && man->use_type) {
type_found = true;
- ret = (*man->func->get_node)(man, bo, placement, mem);
+ ret = (*man->func->get_node)(man, bo, placement,
+ cur_flags, mem);
if (unlikely(ret))
return ret;
}
@@ -937,7 +938,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
ttm_flag_masked(&cur_flags, placement->busy_placement[i],
~TTM_PL_MASK_MEMTYPE);
-
if (mem_type == TTM_PL_SYSTEM) {
mem->mem_type = mem_type;
mem->placement = cur_flags;
@@ -1595,7 +1595,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
* Using ttm_bo_reserve makes sure the lru lists are updated.
*/
- ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
+ ret = ttm_bo_reserve(bo, true, no_wait, false, NULL);
if (unlikely(ret != 0))
return ret;
spin_lock(&bdev->fence_lock);
@@ -1630,7 +1630,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &glob->swap_lru, swap) {
- ret = __ttm_bo_reserve(bo, false, true, false, 0);
+ ret = __ttm_bo_reserve(bo, false, true, false, NULL);
if (!ret)
break;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index bd850c9f4bca..9e103a4875c8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -50,6 +50,7 @@ struct ttm_range_manager {
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
+ uint32_t flags,
struct ttm_mem_reg *mem)
{
struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
@@ -67,7 +68,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
if (!node)
return -ENOMEM;
- if (bo->mem.placement & TTM_PL_FLAG_TOPDOWN)
+ if (flags & TTM_PL_FLAG_TOPDOWN)
aflags = DRM_MM_CREATE_TOP;
spin_lock(&rman->lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 1df856f78568..30e5d90cb7bc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -500,7 +500,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
pgprot_val(tmp) |= _PAGE_GUARDED;
}
#endif
-#if defined(__ia64__)
+#if defined(__ia64__) || defined(__arm__)
if (caching_flags & TTM_PL_FLAG_WC)
tmp = pgprot_writecombine(tmp);
else
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index d7f92fe9d904..66fc6395eb54 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -35,7 +35,7 @@
#include <drm/drm_sysfs.h>
static DECLARE_WAIT_QUEUE_HEAD(exit_q);
-atomic_t device_released;
+static atomic_t device_released;
static struct device_type ttm_drm_class_type = {
.name = "ttm",
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 863bef9f9234..09874d695188 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -297,8 +297,10 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
*
* @pool: to free the pages from
* @free_all: If set to true will free all pages in pool
+ * @gfp: GFP flags.
**/
-static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
+static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
+ gfp_t gfp)
{
unsigned long irq_flags;
struct page *p;
@@ -309,8 +311,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
if (NUM_PAGES_TO_ALLOC < nr_free)
npages_to_free = NUM_PAGES_TO_ALLOC;
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
- GFP_KERNEL);
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
if (!pages_to_free) {
pr_err("Failed to allocate memory for pool free operation\n");
return 0;
@@ -382,32 +383,35 @@ out:
*
* XXX: (dchinner) Deadlock warning!
*
- * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
- * this can deadlock when called a sc->gfp_mask that is not equal to
- * GFP_KERNEL.
+ * We need to pass sc->gfp_mask to ttm_page_pool_free().
*
* This code is crying out for a shrinker per pool....
*/
static unsigned long
ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- static atomic_t start_pool = ATOMIC_INIT(0);
+ static DEFINE_MUTEX(lock);
+ static unsigned start_pool;
unsigned i;
- unsigned pool_offset = atomic_add_return(1, &start_pool);
+ unsigned pool_offset;
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
unsigned long freed = 0;
- pool_offset = pool_offset % NUM_POOLS;
+ if (!mutex_trylock(&lock))
+ return SHRINK_STOP;
+ pool_offset = ++start_pool % NUM_POOLS;
/* select start pool in round robin fashion */
for (i = 0; i < NUM_POOLS; ++i) {
unsigned nr_free = shrink_pages;
if (shrink_pages == 0)
break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
- shrink_pages = ttm_page_pool_free(pool, nr_free);
+ shrink_pages = ttm_page_pool_free(pool, nr_free,
+ sc->gfp_mask);
freed += nr_free - shrink_pages;
}
+ mutex_unlock(&lock);
return freed;
}
@@ -706,7 +710,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
if (npages)
- ttm_page_pool_free(pool, npages);
+ ttm_page_pool_free(pool, npages, GFP_KERNEL);
}
/*
@@ -790,7 +794,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
return 0;
}
-static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
+static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
char *name)
{
spin_lock_init(&pool->lock);
@@ -846,7 +850,8 @@ void ttm_page_alloc_fini(void)
ttm_pool_mm_shrink_fini(_manager);
for (i = 0; i < NUM_POOLS; ++i)
- ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
+ ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
+ GFP_KERNEL);
kobject_put(&_manager->kobj);
_manager = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index fb8259f69839..ca65df144765 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -411,8 +411,10 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
*
* @pool: to free the pages from
* @nr_free: If set to true will free all pages in pool
+ * @gfp: GFP flags.
**/
-static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
+static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
+ gfp_t gfp)
{
unsigned long irq_flags;
struct dma_page *dma_p, *tmp;
@@ -430,8 +432,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
npages_to_free, nr_free);
}
#endif
- pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
- GFP_KERNEL);
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
if (!pages_to_free) {
pr_err("%s: Failed to allocate memory for pool free operation\n",
@@ -530,7 +531,7 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
if (pool->type != type)
continue;
/* Takes a spinlock.. */
- ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
+ ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
/* This code path is called after _all_ references to the
* struct device has been dropped - so nobody should be
@@ -983,7 +984,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
/* shrink pool if necessary (only on !is_cached pools)*/
if (npages)
- ttm_dma_page_pool_free(pool, npages);
+ ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
ttm->state = tt_unpopulated;
}
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
@@ -993,10 +994,7 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
*
* XXX: (dchinner) Deadlock warning!
*
- * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
- * needs to be paid to sc->gfp_mask to determine if this can be done or not.
- * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
- * bad.
+ * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
*
* I'm getting sadder as I hear more pathetical whimpers about needing per-pool
* shrinkers
@@ -1004,9 +1002,9 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
static unsigned long
ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- static atomic_t start_pool = ATOMIC_INIT(0);
+ static unsigned start_pool;
unsigned idx = 0;
- unsigned pool_offset = atomic_add_return(1, &start_pool);
+ unsigned pool_offset;
unsigned shrink_pages = sc->nr_to_scan;
struct device_pools *p;
unsigned long freed = 0;
@@ -1014,8 +1012,11 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
if (list_empty(&_manager->pools))
return SHRINK_STOP;
- mutex_lock(&_manager->lock);
- pool_offset = pool_offset % _manager->npools;
+ if (!mutex_trylock(&_manager->lock))
+ return SHRINK_STOP;
+ if (!_manager->npools)
+ goto out;
+ pool_offset = ++start_pool % _manager->npools;
list_for_each_entry(p, &_manager->pools, pools) {
unsigned nr_free;
@@ -1027,13 +1028,15 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
if (++idx < pool_offset)
continue;
nr_free = shrink_pages;
- shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+ shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
+ sc->gfp_mask);
freed += nr_free - shrink_pages;
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
p->pool->dev_name, p->pool->name, current->pid,
nr_free, shrink_pages);
}
+out:
mutex_unlock(&_manager->lock);
return freed;
}
@@ -1044,7 +1047,8 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
struct device_pools *p;
unsigned long count = 0;
- mutex_lock(&_manager->lock);
+ if (!mutex_trylock(&_manager->lock))
+ return 0;
list_for_each_entry(p, &_manager->pools, pools)
count += p->pool->npages_free;
mutex_unlock(&_manager->lock);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index b44d548c56f8..e026a9e2942a 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -105,14 +105,7 @@ static struct drm_encoder*
udl_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
- struct drm_mode_object *obj;
- struct drm_encoder *encoder;
-
- obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
- if (!obj)
- return NULL;
- encoder = obj_to_encoder(obj);
- return encoder;
+ return drm_encoder_find(connector->dev, enc_id);
}
static int udl_connector_set_property(struct drm_connector *connector,
@@ -124,7 +117,7 @@ static int udl_connector_set_property(struct drm_connector *connector,
static void udl_connector_destroy(struct drm_connector *connector)
{
- drm_sysfs_connector_remove(connector);
+ drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -154,7 +147,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_DVII);
drm_connector_helper_add(connector, &udl_connector_helper_funcs);
- drm_sysfs_connector_add(connector);
+ drm_connector_register(connector);
drm_mode_connector_attach_encoder(connector, encoder);
drm_object_attach_property(&connector->base,
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 377176372da8..d1da339843ca 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -550,7 +550,7 @@ out:
return ret;
}
-static struct drm_fb_helper_funcs udl_fb_helper_funcs = {
+static const struct drm_fb_helper_funcs udl_fb_helper_funcs = {
.fb_probe = udlfb_create,
};
@@ -583,7 +583,8 @@ int udl_fbdev_init(struct drm_device *dev)
return -ENOMEM;
udl->fbdev = ufbdev;
- ufbdev->helper.funcs = &udl_fb_helper_funcs;
+
+ drm_fb_helper_prepare(dev, &ufbdev->helper, &udl_fb_helper_funcs);
ret = drm_fb_helper_init(dev, &ufbdev->helper,
1, 1);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index c041cd73f399..8044f5fb7c49 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -107,14 +107,14 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
}
-static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
+static int udl_gem_get_pages(struct udl_gem_object *obj)
{
struct page **pages;
if (obj->pages)
return 0;
- pages = drm_gem_get_pages(&obj->base, gfpmask);
+ pages = drm_gem_get_pages(&obj->base);
if (IS_ERR(pages))
return PTR_ERR(pages);
@@ -147,7 +147,7 @@ int udl_gem_vmap(struct udl_gem_object *obj)
return 0;
}
- ret = udl_gem_get_pages(obj, GFP_KERNEL);
+ ret = udl_gem_get_pages(obj);
if (ret)
return ret;
@@ -205,7 +205,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
}
gobj = to_udl_bo(obj);
- ret = udl_gem_get_pages(gobj, GFP_KERNEL);
+ ret = udl_gem_get_pages(gobj);
if (ret)
goto out;
ret = drm_gem_create_mmap_offset(obj);
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 7094b92d1ec7..42795674bc07 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -306,10 +306,23 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
DRM_DEBUG("\n");
ret = udl_modeset_init(dev);
+ if (ret)
+ goto err;
ret = udl_fbdev_init(dev);
+ if (ret)
+ goto err;
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ goto err_fb;
+
return 0;
+err_fb:
+ udl_fbdev_cleanup(dev);
err:
+ if (udl->urbs.count)
+ udl_free_urb_list(dev);
kfree(udl);
DRM_ERROR("%d\n", ret);
return ret;
@@ -325,6 +338,8 @@ int udl_driver_unload(struct drm_device *dev)
{
struct udl_device *udl = dev->dev_private;
+ drm_vblank_cleanup(dev);
+
if (udl->urbs.count)
udl_free_urb_list(dev);
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index cddc4fcf35cf..dc145d320b25 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -363,6 +363,26 @@ static void udl_crtc_destroy(struct drm_crtc *crtc)
kfree(crtc);
}
+static int udl_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct udl_framebuffer *ufb = to_udl_fb(fb);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ udl_handle_damage(ufb, 0, 0, fb->width, fb->height);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (event)
+ drm_send_vblank_event(dev, 0, event);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ crtc->primary->fb = fb;
+
+ return 0;
+}
+
static void udl_crtc_prepare(struct drm_crtc *crtc)
{
}
@@ -384,6 +404,7 @@ static struct drm_crtc_helper_funcs udl_helper_funcs = {
static const struct drm_crtc_funcs udl_crtc_funcs = {
.set_config = drm_crtc_helper_set_config,
.destroy = udl_crtc_destroy,
+ .page_flip = udl_crtc_page_flip,
};
static int udl_crtc_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 458cdf6d81e8..ce0ab951f507 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -6,6 +6,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
- vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o
+ vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
+ vmwgfx_cmdbuf_res.o \
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
new file mode 100644
index 000000000000..bfeb4b1f2acf
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -0,0 +1,341 @@
+/**************************************************************************
+ *
+ * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+
+#define VMW_CMDBUF_RES_MAN_HT_ORDER 12
+
+enum vmw_cmdbuf_res_state {
+ VMW_CMDBUF_RES_COMMITED,
+ VMW_CMDBUF_RES_ADD,
+ VMW_CMDBUF_RES_DEL
+};
+
+/**
+ * struct vmw_cmdbuf_res - Command buffer managed resource entry.
+ *
+ * @res: Refcounted pointer to a struct vmw_resource.
+ * @hash: Hash entry for the manager hash table.
+ * @head: List head used either by the staging list or the manager list
+ * of commited resources.
+ * @state: Staging state of this resource entry.
+ * @man: Pointer to a resource manager for this entry.
+ */
+struct vmw_cmdbuf_res {
+ struct vmw_resource *res;
+ struct drm_hash_item hash;
+ struct list_head head;
+ enum vmw_cmdbuf_res_state state;
+ struct vmw_cmdbuf_res_manager *man;
+};
+
+/**
+ * struct vmw_cmdbuf_res_manager - Command buffer resource manager.
+ *
+ * @resources: Hash table containing staged and commited command buffer
+ * resources
+ * @list: List of commited command buffer resources.
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * @resources and @list are protected by the cmdbuf mutex for now.
+ */
+struct vmw_cmdbuf_res_manager {
+ struct drm_open_hash resources;
+ struct list_head list;
+ struct vmw_private *dev_priv;
+};
+
+
+/**
+ * vmw_cmdbuf_res_lookup - Look up a command buffer resource
+ *
+ * @man: Pointer to the command buffer resource manager
+ * @resource_type: The resource type, that combined with the user key
+ * identifies the resource.
+ * @user_key: The user key.
+ *
+ * Returns a valid refcounted struct vmw_resource pointer on success,
+ * an error pointer on failure.
+ */
+struct vmw_resource *
+vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_cmdbuf_res_type res_type,
+ u32 user_key)
+{
+ struct drm_hash_item *hash;
+ int ret;
+ unsigned long key = user_key | (res_type << 24);
+
+ ret = drm_ht_find_item(&man->resources, key, &hash);
+ if (unlikely(ret != 0))
+ return ERR_PTR(ret);
+
+ return vmw_resource_reference
+ (drm_hash_entry(hash, struct vmw_cmdbuf_res, hash)->res);
+}
+
+/**
+ * vmw_cmdbuf_res_free - Free a command buffer resource.
+ *
+ * @man: Pointer to the command buffer resource manager
+ * @entry: Pointer to a struct vmw_cmdbuf_res.
+ *
+ * Frees a struct vmw_cmdbuf_res entry and drops its reference to the
+ * struct vmw_resource.
+ */
+static void vmw_cmdbuf_res_free(struct vmw_cmdbuf_res_manager *man,
+ struct vmw_cmdbuf_res *entry)
+{
+ list_del(&entry->head);
+ WARN_ON(drm_ht_remove_item(&man->resources, &entry->hash));
+ vmw_resource_unreference(&entry->res);
+ kfree(entry);
+}
+
+/**
+ * vmw_cmdbuf_res_commit - Commit a list of command buffer resource actions
+ *
+ * @list: Caller's list of command buffer resource actions.
+ *
+ * This function commits a list of command buffer resource
+ * additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions has commited the fifo contents to the device.
+ */
+void vmw_cmdbuf_res_commit(struct list_head *list)
+{
+ struct vmw_cmdbuf_res *entry, *next;
+
+ list_for_each_entry_safe(entry, next, list, head) {
+ list_del(&entry->head);
+ switch (entry->state) {
+ case VMW_CMDBUF_RES_ADD:
+ entry->state = VMW_CMDBUF_RES_COMMITED;
+ list_add_tail(&entry->head, &entry->man->list);
+ break;
+ case VMW_CMDBUF_RES_DEL:
+ vmw_resource_unreference(&entry->res);
+ kfree(entry);
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+}
+
+/**
+ * vmw_cmdbuf_res_revert - Revert a list of command buffer resource actions
+ *
+ * @man: Pointer to the command buffer resource manager
+ * @list: Caller's list of command buffer resource action
+ *
+ * This function reverts a list of command buffer resource
+ * additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions failed for some reason, and the command stream was never
+ * submitted.
+ */
+void vmw_cmdbuf_res_revert(struct list_head *list)
+{
+ struct vmw_cmdbuf_res *entry, *next;
+ int ret;
+
+ list_for_each_entry_safe(entry, next, list, head) {
+ switch (entry->state) {
+ case VMW_CMDBUF_RES_ADD:
+ vmw_cmdbuf_res_free(entry->man, entry);
+ break;
+ case VMW_CMDBUF_RES_DEL:
+ ret = drm_ht_insert_item(&entry->man->resources,
+ &entry->hash);
+ list_del(&entry->head);
+ list_add_tail(&entry->head, &entry->man->list);
+ entry->state = VMW_CMDBUF_RES_COMMITED;
+ break;
+ default:
+ BUG();
+ break;
+ }
+ }
+}
+
+/**
+ * vmw_cmdbuf_res_add - Stage a command buffer managed resource for addition.
+ *
+ * @man: Pointer to the command buffer resource manager.
+ * @res_type: The resource type.
+ * @user_key: The user-space id of the resource.
+ * @res: Valid (refcount != 0) pointer to a struct vmw_resource.
+ * @list: The staging list.
+ *
+ * This function allocates a struct vmw_cmdbuf_res entry and adds the
+ * resource to the hash table of the manager identified by @man. The
+ * entry is then put on the staging list identified by @list.
+ */
+int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_cmdbuf_res_type res_type,
+ u32 user_key,
+ struct vmw_resource *res,
+ struct list_head *list)
+{
+ struct vmw_cmdbuf_res *cres;
+ int ret;
+
+ cres = kzalloc(sizeof(*cres), GFP_KERNEL);
+ if (unlikely(cres == NULL))
+ return -ENOMEM;
+
+ cres->hash.key = user_key | (res_type << 24);
+ ret = drm_ht_insert_item(&man->resources, &cres->hash);
+ if (unlikely(ret != 0))
+ goto out_invalid_key;
+
+ cres->state = VMW_CMDBUF_RES_ADD;
+ cres->res = vmw_resource_reference(res);
+ cres->man = man;
+ list_add_tail(&cres->head, list);
+
+out_invalid_key:
+ return ret;
+}
+
+/**
+ * vmw_cmdbuf_res_remove - Stage a command buffer managed resource for removal.
+ *
+ * @man: Pointer to the command buffer resource manager.
+ * @res_type: The resource type.
+ * @user_key: The user-space id of the resource.
+ * @list: The staging list.
+ *
+ * This function looks up the struct vmw_cmdbuf_res entry from the manager
+ * hash table and, if it exists, removes it. Depending on its current staging
+ * state it then either removes the entry from the staging list or adds it
+ * to it with a staging state of removal.
+ */
+int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_cmdbuf_res_type res_type,
+ u32 user_key,
+ struct list_head *list)
+{
+ struct vmw_cmdbuf_res *entry;
+ struct drm_hash_item *hash;
+ int ret;
+
+ ret = drm_ht_find_item(&man->resources, user_key, &hash);
+ if (likely(ret != 0))
+ return -EINVAL;
+
+ entry = drm_hash_entry(hash, struct vmw_cmdbuf_res, hash);
+
+ switch (entry->state) {
+ case VMW_CMDBUF_RES_ADD:
+ vmw_cmdbuf_res_free(man, entry);
+ break;
+ case VMW_CMDBUF_RES_COMMITED:
+ (void) drm_ht_remove_item(&man->resources, &entry->hash);
+ list_del(&entry->head);
+ entry->state = VMW_CMDBUF_RES_DEL;
+ list_add_tail(&entry->head, list);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_cmdbuf_res_man_create - Allocate a command buffer managed resource
+ * manager.
+ *
+ * @dev_priv: Pointer to a struct vmw_private
+ *
+ * Allocates and initializes a command buffer managed resource manager. Returns
+ * an error pointer on failure.
+ */
+struct vmw_cmdbuf_res_manager *
+vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv)
+{
+ struct vmw_cmdbuf_res_manager *man;
+ int ret;
+
+ man = kzalloc(sizeof(*man), GFP_KERNEL);
+ if (man == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ man->dev_priv = dev_priv;
+ INIT_LIST_HEAD(&man->list);
+ ret = drm_ht_create(&man->resources, VMW_CMDBUF_RES_MAN_HT_ORDER);
+ if (ret == 0)
+ return man;
+
+ kfree(man);
+ return ERR_PTR(ret);
+}
+
+/**
+ * vmw_cmdbuf_res_man_destroy - Destroy a command buffer managed resource
+ * manager.
+ *
+ * @man: Pointer to the manager to destroy.
+ *
+ * This function destroys a command buffer managed resource manager and
+ * unreferences / frees all command buffer managed resources and -entries
+ * associated with it.
+ */
+void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
+{
+ struct vmw_cmdbuf_res *entry, *next;
+
+ list_for_each_entry_safe(entry, next, &man->list, head)
+ vmw_cmdbuf_res_free(man, entry);
+
+ kfree(man);
+}
+
+/**
+ *
+ * vmw_cmdbuf_res_man_size - Return the size of a command buffer managed
+ * resource manager
+ *
+ * Returns the approximate allocation size of a command buffer managed
+ * resource manager.
+ */
+size_t vmw_cmdbuf_res_man_size(void)
+{
+ static size_t res_man_size;
+
+ if (unlikely(res_man_size == 0))
+ res_man_size =
+ ttm_round_pot(sizeof(struct vmw_cmdbuf_res_manager)) +
+ ttm_round_pot(sizeof(struct hlist_head) <<
+ VMW_CMDBUF_RES_MAN_HT_ORDER);
+
+ return res_man_size;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 8bb26dcd9eae..5ac92874404d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -33,6 +33,7 @@ struct vmw_user_context {
struct ttm_base_object base;
struct vmw_resource res;
struct vmw_ctx_binding_state cbs;
+ struct vmw_cmdbuf_res_manager *man;
};
@@ -103,7 +104,8 @@ static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
static void vmw_hw_context_destroy(struct vmw_resource *res)
{
-
+ struct vmw_user_context *uctx =
+ container_of(res, struct vmw_user_context, res);
struct vmw_private *dev_priv = res->dev_priv;
struct {
SVGA3dCmdHeader header;
@@ -113,9 +115,9 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
if (res->func->destroy == vmw_gb_context_destroy) {
mutex_lock(&dev_priv->cmdbuf_mutex);
+ vmw_cmdbuf_res_man_destroy(uctx->man);
mutex_lock(&dev_priv->binding_mutex);
- (void) vmw_context_binding_state_kill
- (&container_of(res, struct vmw_user_context, res)->cbs);
+ (void) vmw_context_binding_state_kill(&uctx->cbs);
(void) vmw_gb_context_destroy(res);
mutex_unlock(&dev_priv->binding_mutex);
if (dev_priv->pinned_bo != NULL &&
@@ -152,13 +154,16 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
ret = vmw_resource_init(dev_priv, res, true,
res_free, &vmw_gb_context_func);
res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
+ if (unlikely(ret != 0))
+ goto out_err;
- if (unlikely(ret != 0)) {
- if (res_free)
- res_free(res);
- else
- kfree(res);
- return ret;
+ if (dev_priv->has_mob) {
+ uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
+ if (unlikely(IS_ERR(uctx->man))) {
+ ret = PTR_ERR(uctx->man);
+ uctx->man = NULL;
+ goto out_err;
+ }
}
memset(&uctx->cbs, 0, sizeof(uctx->cbs));
@@ -166,6 +171,13 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
+
+out_err:
+ if (res_free)
+ res_free(res);
+ else
+ kfree(res);
+ return ret;
}
static int vmw_context_init(struct vmw_private *dev_priv,
@@ -471,7 +483,8 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
*/
if (unlikely(vmw_user_context_size == 0))
- vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
+ vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
+ ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
if (unlikely(ret != 0))
@@ -901,3 +914,8 @@ struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
{
return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
}
+
+struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
+{
+ return container_of(ctx, struct vmw_user_context, res)->man;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 70ddce8358b0..ed1d51006ab1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -61,7 +61,7 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
vmw_execbuf_release_pinned_bo(dev_priv);
- ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+ ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0))
goto err;
@@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
if (pin)
vmw_execbuf_release_pinned_bo(dev_priv);
- ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+ ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0))
goto err;
@@ -212,7 +212,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
if (pin)
vmw_execbuf_release_pinned_bo(dev_priv);
- ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+ ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
if (unlikely(ret != 0))
goto err_unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 246a62bab378..18b54acacfbb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -316,7 +316,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(bo, false, true, false, 0);
+ ret = ttm_bo_reserve(bo, false, true, false, NULL);
BUG_ON(ret != 0);
ret = ttm_bo_kmap(bo, 0, 1, &map);
@@ -946,7 +946,6 @@ static void vmw_postclose(struct drm_device *dev,
drm_master_put(&vmw_fp->locked_master);
}
- vmw_compat_shader_man_destroy(vmw_fp->shman);
ttm_object_file_release(&vmw_fp->tfile);
kfree(vmw_fp);
}
@@ -966,16 +965,10 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
if (unlikely(vmw_fp->tfile == NULL))
goto out_no_tfile;
- vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
- if (IS_ERR(vmw_fp->shman))
- goto out_no_shman;
-
file_priv->driver_priv = vmw_fp;
return 0;
-out_no_shman:
- ttm_object_file_release(&vmw_fp->tfile);
out_no_tfile:
kfree(vmw_fp);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index c886c024c637..99f731757c4b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,10 +40,10 @@
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20140325"
+#define VMWGFX_DRIVER_DATE "20140704"
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 6
-#define VMWGFX_DRIVER_PATCHLEVEL 0
+#define VMWGFX_DRIVER_PATCHLEVEL 1
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
@@ -75,14 +75,11 @@
#define VMW_RES_FENCE ttm_driver_type3
#define VMW_RES_SHADER ttm_driver_type4
-struct vmw_compat_shader_manager;
-
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
struct list_head fence_events;
bool gb_aware;
- struct vmw_compat_shader_manager *shman;
};
struct vmw_dma_buffer {
@@ -124,6 +121,10 @@ struct vmw_resource {
void (*hw_destroy) (struct vmw_resource *res);
};
+
+/*
+ * Resources that are managed using ioctls.
+ */
enum vmw_res_type {
vmw_res_context,
vmw_res_surface,
@@ -132,6 +133,15 @@ enum vmw_res_type {
vmw_res_max
};
+/*
+ * Resources that are managed using command streams.
+ */
+enum vmw_cmdbuf_res_type {
+ vmw_cmdbuf_res_compat_shader
+};
+
+struct vmw_cmdbuf_res_manager;
+
struct vmw_cursor_snooper {
struct drm_crtc *crtc;
size_t age;
@@ -341,7 +351,7 @@ struct vmw_sw_context{
bool needs_post_query_barrier;
struct vmw_resource *error_resource;
struct vmw_ctx_binding_state staged_bindings;
- struct list_head staged_shaders;
+ struct list_head staged_cmd_res;
};
struct vmw_legacy_display;
@@ -974,7 +984,8 @@ extern void vmw_context_binding_res_list_kill(struct list_head *head);
extern void vmw_context_binding_res_list_scrub(struct list_head *head);
extern int vmw_context_rebind_all(struct vmw_resource *ctx);
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
-
+extern struct vmw_cmdbuf_res_manager *
+vmw_context_res_man(struct vmw_resource *ctx);
/*
* Surface management - vmwgfx_surface.c
*/
@@ -1008,27 +1019,42 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
- SVGA3dShaderType shader_type,
- u32 *user_key);
-extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
- struct list_head *list);
-extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
- struct list_head *list);
-extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
- u32 user_key,
- SVGA3dShaderType shader_type,
- struct list_head *list);
-extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
+ struct vmw_cmdbuf_res_manager *man,
u32 user_key, const void *bytecode,
SVGA3dShaderType shader_type,
size_t size,
- struct ttm_object_file *tfile,
struct list_head *list);
-extern struct vmw_compat_shader_manager *
-vmw_compat_shader_man_create(struct vmw_private *dev_priv);
-extern void
-vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
+extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key, SVGA3dShaderType shader_type,
+ struct list_head *list);
+extern struct vmw_resource *
+vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key, SVGA3dShaderType shader_type);
+
+/*
+ * Command buffer managed resources - vmwgfx_cmdbuf_res.c
+ */
+
+extern struct vmw_cmdbuf_res_manager *
+vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
+extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
+extern size_t vmw_cmdbuf_res_man_size(void);
+extern struct vmw_resource *
+vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_cmdbuf_res_type res_type,
+ u32 user_key);
+extern void vmw_cmdbuf_res_revert(struct list_head *list);
+extern void vmw_cmdbuf_res_commit(struct list_head *list);
+extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_cmdbuf_res_type res_type,
+ u32 user_key,
+ struct vmw_resource *res,
+ struct list_head *list);
+extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
+ enum vmw_cmdbuf_res_type res_type,
+ u32 user_key,
+ struct list_head *list);
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 87df0b3674fd..7bfdaa163a33 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -422,28 +422,91 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
return 0;
}
+
+/**
+ * vmw_cmd_res_reloc_add - Add a resource to a software context's
+ * relocation- and validation lists.
+ *
+ * @dev_priv: Pointer to a struct vmw_private identifying the device.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @id_loc: Pointer to where the id that needs translation is located.
+ * @res: Valid pointer to a struct vmw_resource.
+ * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
+ * used for this resource is returned here.
+ */
+static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ uint32_t *id_loc,
+ struct vmw_resource *res,
+ struct vmw_resource_val_node **p_val)
+{
+ int ret;
+ struct vmw_resource_val_node *node;
+
+ *p_val = NULL;
+ ret = vmw_resource_relocation_add(&sw_context->res_relocations,
+ res,
+ id_loc - sw_context->buf_start);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ ret = vmw_resource_val_add(sw_context, res, &node);
+ if (unlikely(ret != 0))
+ goto out_err;
+
+ if (res_type == vmw_res_context && dev_priv->has_mob &&
+ node->first_usage) {
+
+ /*
+ * Put contexts first on the list to be able to exit
+ * list traversal for contexts early.
+ */
+ list_del(&node->head);
+ list_add(&node->head, &sw_context->resource_list);
+
+ ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
+ if (unlikely(ret != 0))
+ goto out_err;
+ node->staged_bindings =
+ kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
+ if (node->staged_bindings == NULL) {
+ DRM_ERROR("Failed to allocate context binding "
+ "information.\n");
+ goto out_err;
+ }
+ INIT_LIST_HEAD(&node->staged_bindings->list);
+ }
+
+ if (p_val)
+ *p_val = node;
+
+out_err:
+ return ret;
+}
+
+
/**
- * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @converter: User-space visisble type specific information.
- * @id: user-space resource id handle.
* @id_loc: Pointer to the location in the command buffer currently being
* parsed from where the user-space resource id handle is located.
* @p_val: Pointer to pointer to resource validalidation node. Populated
* on exit.
*/
static int
-vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- enum vmw_res_type res_type,
- const struct vmw_user_resource_conv *converter,
- uint32_t id,
- uint32_t *id_loc,
- struct vmw_resource_val_node **p_val)
+vmw_cmd_res_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t *id_loc,
+ struct vmw_resource_val_node **p_val)
{
struct vmw_res_cache_entry *rcache =
&sw_context->res_cache[res_type];
@@ -451,7 +514,7 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
struct vmw_resource_val_node *node;
int ret;
- if (id == SVGA3D_INVALID_ID) {
+ if (*id_loc == SVGA3D_INVALID_ID) {
if (p_val)
*p_val = NULL;
if (res_type == vmw_res_context) {
@@ -466,7 +529,7 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
* resource
*/
- if (likely(rcache->valid && id == rcache->handle)) {
+ if (likely(rcache->valid && *id_loc == rcache->handle)) {
const struct vmw_resource *res = rcache->res;
rcache->node->first_usage = false;
@@ -480,49 +543,28 @@ vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
ret = vmw_user_resource_lookup_handle(dev_priv,
sw_context->fp->tfile,
- id,
+ *id_loc,
converter,
&res);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use resource 0x%08x.\n",
- (unsigned) id);
+ (unsigned) *id_loc);
dump_stack();
return ret;
}
rcache->valid = true;
rcache->res = res;
- rcache->handle = id;
-
- ret = vmw_resource_relocation_add(&sw_context->res_relocations,
- res,
- id_loc - sw_context->buf_start);
- if (unlikely(ret != 0))
- goto out_no_reloc;
+ rcache->handle = *id_loc;
- ret = vmw_resource_val_add(sw_context, res, &node);
+ ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
+ res, &node);
if (unlikely(ret != 0))
goto out_no_reloc;
rcache->node = node;
if (p_val)
*p_val = node;
-
- if (dev_priv->has_mob && node->first_usage &&
- res_type == vmw_res_context) {
- ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
- if (unlikely(ret != 0))
- goto out_no_reloc;
- node->staged_bindings =
- kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
- if (node->staged_bindings == NULL) {
- DRM_ERROR("Failed to allocate context binding "
- "information.\n");
- goto out_no_reloc;
- }
- INIT_LIST_HEAD(&node->staged_bindings->list);
- }
-
vmw_resource_unreference(&res);
return 0;
@@ -534,31 +576,6 @@ out_no_reloc:
}
/**
- * vmw_cmd_res_check - Check that a resource is present and if so, put it
- * on the resource validate list unless it's already there.
- *
- * @dev_priv: Pointer to a device private structure.
- * @sw_context: Pointer to the software context.
- * @res_type: Resource type.
- * @converter: User-space visisble type specific information.
- * @id_loc: Pointer to the location in the command buffer currently being
- * parsed from where the user-space resource id handle is located.
- * @p_val: Pointer to pointer to resource validalidation node. Populated
- * on exit.
- */
-static int
-vmw_cmd_res_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- enum vmw_res_type res_type,
- const struct vmw_user_resource_conv *converter,
- uint32_t *id_loc,
- struct vmw_resource_val_node **p_val)
-{
- return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
- converter, *id_loc, id_loc, p_val);
-}
-
-/**
* vmw_rebind_contexts - Rebind all resources previously bound to
* referenced contexts.
*
@@ -572,8 +589,8 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
int ret;
list_for_each_entry(val, &sw_context->resource_list, head) {
- if (likely(!val->staged_bindings))
- continue;
+ if (unlikely(!val->staged_bindings))
+ break;
ret = vmw_context_rebind_all(val->res);
if (unlikely(ret != 0)) {
@@ -1626,13 +1643,14 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
} *cmd;
int ret;
size_t size;
+ struct vmw_resource_val_node *val;
cmd = container_of(header, struct vmw_shader_define_cmd,
header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
- NULL);
+ &val);
if (unlikely(ret != 0))
return ret;
@@ -1640,11 +1658,11 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
return 0;
size = cmd->header.size - sizeof(cmd->body);
- ret = vmw_compat_shader_add(sw_context->fp->shman,
+ ret = vmw_compat_shader_add(dev_priv,
+ vmw_context_res_man(val->res),
cmd->body.shid, cmd + 1,
cmd->body.type, size,
- sw_context->fp->tfile,
- &sw_context->staged_shaders);
+ &sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
@@ -1672,23 +1690,24 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
SVGA3dCmdDestroyShader body;
} *cmd;
int ret;
+ struct vmw_resource_val_node *val;
cmd = container_of(header, struct vmw_shader_destroy_cmd,
header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
- NULL);
+ &val);
if (unlikely(ret != 0))
return ret;
if (unlikely(!dev_priv->has_mob))
return 0;
- ret = vmw_compat_shader_remove(sw_context->fp->shman,
+ ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
cmd->body.shid,
cmd->body.type,
- &sw_context->staged_shaders);
+ &sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
@@ -1715,7 +1734,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
SVGA3dCmdHeader header;
SVGA3dCmdSetShader body;
} *cmd;
- struct vmw_resource_val_node *ctx_node;
+ struct vmw_resource_val_node *ctx_node, *res_node = NULL;
+ struct vmw_ctx_bindinfo bi;
+ struct vmw_resource *res = NULL;
int ret;
cmd = container_of(header, struct vmw_set_shader_cmd,
@@ -1727,32 +1748,40 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- if (dev_priv->has_mob) {
- struct vmw_ctx_bindinfo bi;
- struct vmw_resource_val_node *res_node;
- u32 shid = cmd->body.shid;
-
- if (shid != SVGA3D_INVALID_ID)
- (void) vmw_compat_shader_lookup(sw_context->fp->shman,
- cmd->body.type,
- &shid);
-
- ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
- vmw_res_shader,
- user_shader_converter,
- shid,
- &cmd->body.shid, &res_node);
+ if (!dev_priv->has_mob)
+ return 0;
+
+ if (cmd->body.shid != SVGA3D_INVALID_ID) {
+ res = vmw_compat_shader_lookup
+ (vmw_context_res_man(ctx_node->res),
+ cmd->body.shid,
+ cmd->body.type);
+
+ if (!IS_ERR(res)) {
+ ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
+ vmw_res_shader,
+ &cmd->body.shid, res,
+ &res_node);
+ vmw_resource_unreference(&res);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ }
+
+ if (!res_node) {
+ ret = vmw_cmd_res_check(dev_priv, sw_context,
+ vmw_res_shader,
+ user_shader_converter,
+ &cmd->body.shid, &res_node);
if (unlikely(ret != 0))
return ret;
-
- bi.ctx = ctx_node->res;
- bi.res = res_node ? res_node->res : NULL;
- bi.bt = vmw_ctx_binding_shader;
- bi.i1.shader_type = cmd->body.type;
- return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
}
- return 0;
+ bi.ctx = ctx_node->res;
+ bi.res = res_node ? res_node->res : NULL;
+ bi.bt = vmw_ctx_binding_shader;
+ bi.i1.shader_type = cmd->body.type;
+ return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
}
/**
@@ -2394,6 +2423,8 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
}
}
+
+
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands,
@@ -2453,7 +2484,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_unlock;
sw_context->res_ht_initialized = true;
}
- INIT_LIST_HEAD(&sw_context->staged_shaders);
+ INIT_LIST_HEAD(&sw_context->staged_cmd_res);
INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
@@ -2548,8 +2579,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
}
list_splice_init(&sw_context->resource_list, &resource_list);
- vmw_compat_shaders_commit(sw_context->fp->shman,
- &sw_context->staged_shaders);
+ vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
@@ -2576,8 +2606,7 @@ out_unlock:
list_splice_init(&sw_context->resource_list, &resource_list);
error_resource = sw_context->error_resource;
sw_context->error_resource = NULL;
- vmw_compat_shaders_revert(sw_context->fp->shman,
- &sw_context->staged_shaders);
+ vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index b1273e8e9a69..26f8bdde3529 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -47,6 +47,7 @@ struct vmwgfx_gmrid_man {
static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
struct ttm_buffer_object *bo,
struct ttm_placement *placement,
+ uint32_t flags,
struct ttm_mem_reg *mem)
{
struct vmwgfx_gmrid_man *gman =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8f3edc4710f2..d2bc2b03d4c6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -75,7 +75,7 @@ void vmw_display_unit_cleanup(struct vmw_display_unit *du)
vmw_surface_unreference(&du->cursor_surface);
if (du->cursor_dmabuf)
vmw_dmabuf_unreference(&du->cursor_dmabuf);
- drm_sysfs_connector_remove(&du->connector);
+ drm_connector_unregister(&du->connector);
drm_crtc_cleanup(&du->crtc);
drm_encoder_cleanup(&du->encoder);
drm_connector_cleanup(&du->connector);
@@ -136,7 +136,7 @@ int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
kmap_offset = 0;
kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
- ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
+ ret = ttm_bo_reserve(&dmabuf->base, true, false, false, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return -EINVAL;
@@ -343,7 +343,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
kmap_num = (64*64*4) >> PAGE_SHIFT;
- ret = ttm_bo_reserve(bo, true, false, false, 0);
+ ret = ttm_bo_reserve(bo, true, false, false, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return;
@@ -1501,7 +1501,6 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
{
struct drm_vmw_cursor_bypass_arg *arg = data;
struct vmw_display_unit *du;
- struct drm_mode_object *obj;
struct drm_crtc *crtc;
int ret = 0;
@@ -1519,13 +1518,12 @@ int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
return 0;
}
- obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
- if (!obj) {
+ crtc = drm_crtc_find(dev, arg->crtc_id);
+ if (!crtc) {
ret = -ENOENT;
goto out;
}
- crtc = obj_to_crtc(obj);
du = vmw_crtc_to_du(crtc);
du->hotspot_x = arg->xhot;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index b2b9bd23aeee..15e185ae4c99 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -371,7 +371,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
- (void) drm_sysfs_connector_add(connector);
+ (void) drm_connector_register(connector);
drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 01d68f0a69dc..a432c0db257c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -127,7 +127,7 @@ static void vmw_resource_release(struct kref *kref)
if (res->backup) {
struct ttm_buffer_object *bo = &res->backup->base;
- ttm_bo_reserve(bo, false, false, false, 0);
+ ttm_bo_reserve(bo, false, false, false, NULL);
if (!list_empty(&res->mob_head) &&
res->func->unbind != NULL) {
struct ttm_validate_buffer val_buf;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index a95d3a0cabe4..b295463a60b3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -467,7 +467,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
encoder->possible_crtcs = (1 << unit);
encoder->possible_clones = 0;
- (void) drm_sysfs_connector_add(connector);
+ (void) drm_connector_register(connector);
drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index c1559eeaffe9..8719fb3cccc9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -29,8 +29,6 @@
#include "vmwgfx_resource_priv.h"
#include "ttm/ttm_placement.h"
-#define VMW_COMPAT_SHADER_HT_ORDER 12
-
struct vmw_shader {
struct vmw_resource res;
SVGA3dShaderType type;
@@ -42,49 +40,8 @@ struct vmw_user_shader {
struct vmw_shader shader;
};
-/**
- * enum vmw_compat_shader_state - Staging state for compat shaders
- */
-enum vmw_compat_shader_state {
- VMW_COMPAT_COMMITED,
- VMW_COMPAT_ADD,
- VMW_COMPAT_DEL
-};
-
-/**
- * struct vmw_compat_shader - Metadata for compat shaders.
- *
- * @handle: The TTM handle of the guest backed shader.
- * @tfile: The struct ttm_object_file the guest backed shader is registered
- * with.
- * @hash: Hash item for lookup.
- * @head: List head for staging lists or the compat shader manager list.
- * @state: Staging state.
- *
- * The structure is protected by the cmdbuf lock.
- */
-struct vmw_compat_shader {
- u32 handle;
- struct ttm_object_file *tfile;
- struct drm_hash_item hash;
- struct list_head head;
- enum vmw_compat_shader_state state;
-};
-
-/**
- * struct vmw_compat_shader_manager - Compat shader manager.
- *
- * @shaders: Hash table containing staged and commited compat shaders
- * @list: List of commited shaders.
- * @dev_priv: Pointer to a device private structure.
- *
- * @shaders and @list are protected by the cmdbuf mutex for now.
- */
-struct vmw_compat_shader_manager {
- struct drm_open_hash shaders;
- struct list_head list;
- struct vmw_private *dev_priv;
-};
+static uint64_t vmw_user_shader_size;
+static uint64_t vmw_shader_size;
static void vmw_user_shader_free(struct vmw_resource *res);
static struct vmw_resource *
@@ -98,8 +55,6 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_shader_destroy(struct vmw_resource *res);
-static uint64_t vmw_user_shader_size;
-
static const struct vmw_user_resource_conv user_shader_conv = {
.object_type = VMW_RES_SHADER,
.base_obj_to_res = vmw_user_shader_base_to_res,
@@ -347,6 +302,16 @@ static void vmw_user_shader_free(struct vmw_resource *res)
vmw_user_shader_size);
}
+static void vmw_shader_free(struct vmw_resource *res)
+{
+ struct vmw_shader *shader = vmw_res_to_shader(res);
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ kfree(shader);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_shader_size);
+}
+
/**
* This function is called when user space has no more references on the
* base object. It releases the base-object's reference on the resource object.
@@ -371,13 +336,13 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
TTM_REF_USAGE);
}
-static int vmw_shader_alloc(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *buffer,
- size_t shader_size,
- size_t offset,
- SVGA3dShaderType shader_type,
- struct ttm_object_file *tfile,
- u32 *handle)
+static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buffer,
+ size_t shader_size,
+ size_t offset,
+ SVGA3dShaderType shader_type,
+ struct ttm_object_file *tfile,
+ u32 *handle)
{
struct vmw_user_shader *ushader;
struct vmw_resource *res, *tmp;
@@ -442,6 +407,56 @@ out:
}
+struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buffer,
+ size_t shader_size,
+ size_t offset,
+ SVGA3dShaderType shader_type)
+{
+ struct vmw_shader *shader;
+ struct vmw_resource *res;
+ int ret;
+
+ /*
+ * Approximate idr memory usage with 128 bytes. It will be limited
+ * by maximum number_of shaders anyway.
+ */
+ if (unlikely(vmw_shader_size == 0))
+ vmw_shader_size =
+ ttm_round_pot(sizeof(struct vmw_shader)) + 128;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_shader_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for shader "
+ "creation.\n");
+ goto out_err;
+ }
+
+ shader = kzalloc(sizeof(*shader), GFP_KERNEL);
+ if (unlikely(shader == NULL)) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_shader_size);
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ res = &shader->res;
+
+ /*
+ * From here on, the destructor takes over resource freeing.
+ */
+ ret = vmw_gb_shader_init(dev_priv, res, shader_size,
+ offset, shader_type, buffer,
+ vmw_shader_free);
+
+out_err:
+ return ret ? ERR_PTR(ret) : res;
+}
+
+
int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -490,8 +505,8 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out_bad_arg;
- ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
- shader_type, tfile, &arg->shader_handle);
+ ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
+ shader_type, tfile, &arg->shader_handle);
ttm_read_unlock(&dev_priv->reservation_sem);
out_bad_arg:
@@ -500,202 +515,83 @@ out_bad_arg:
}
/**
- * vmw_compat_shader_lookup - Look up a compat shader
- *
- * @man: Pointer to the compat shader manager.
- * @shader_type: The shader type, that combined with the user_key identifies
- * the shader.
- * @user_key: On entry, this should be a pointer to the user_key.
- * On successful exit, it will contain the guest-backed shader's TTM handle.
+ * vmw_compat_shader_id_ok - Check whether a compat shader user key and
+ * shader type are within valid bounds.
*
- * Returns 0 on success. Non-zero on failure, in which case the value pointed
- * to by @user_key is unmodified.
- */
-int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
- SVGA3dShaderType shader_type,
- u32 *user_key)
-{
- struct drm_hash_item *hash;
- int ret;
- unsigned long key = *user_key | (shader_type << 24);
-
- ret = drm_ht_find_item(&man->shaders, key, &hash);
- if (unlikely(ret != 0))
- return ret;
-
- *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
- hash)->handle;
-
- return 0;
-}
-
-/**
- * vmw_compat_shader_free - Free a compat shader.
- *
- * @man: Pointer to the compat shader manager.
- * @entry: Pointer to a struct vmw_compat_shader.
- *
- * Frees a struct vmw_compat_shder entry and drops its reference to the
- * guest backed shader.
- */
-static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
- struct vmw_compat_shader *entry)
-{
- list_del(&entry->head);
- WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
- WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
- TTM_REF_USAGE));
- kfree(entry);
-}
-
-/**
- * vmw_compat_shaders_commit - Commit a list of compat shader actions.
- *
- * @man: Pointer to the compat shader manager.
- * @list: Caller's list of compat shader actions.
+ * @user_key: User space id of the shader.
+ * @shader_type: Shader type.
*
- * This function commits a list of compat shader additions or removals.
- * It is typically called when the execbuf ioctl call triggering these
- * actions has commited the fifo contents to the device.
+ * Returns true if valid false if not.
*/
-void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
- struct list_head *list)
+static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type)
{
- struct vmw_compat_shader *entry, *next;
-
- list_for_each_entry_safe(entry, next, list, head) {
- list_del(&entry->head);
- switch (entry->state) {
- case VMW_COMPAT_ADD:
- entry->state = VMW_COMPAT_COMMITED;
- list_add_tail(&entry->head, &man->list);
- break;
- case VMW_COMPAT_DEL:
- ttm_ref_object_base_unref(entry->tfile, entry->handle,
- TTM_REF_USAGE);
- kfree(entry);
- break;
- default:
- BUG();
- break;
- }
- }
+ return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16;
}
/**
- * vmw_compat_shaders_revert - Revert a list of compat shader actions
+ * vmw_compat_shader_key - Compute a hash key suitable for a compat shader.
*
- * @man: Pointer to the compat shader manager.
- * @list: Caller's list of compat shader actions.
+ * @user_key: User space id of the shader.
+ * @shader_type: Shader type.
*
- * This function reverts a list of compat shader additions or removals.
- * It is typically called when the execbuf ioctl call triggering these
- * actions failed for some reason, and the command stream was never
- * submitted.
+ * Returns a hash key suitable for a command buffer managed resource
+ * manager hash table.
*/
-void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
- struct list_head *list)
+static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type)
{
- struct vmw_compat_shader *entry, *next;
- int ret;
-
- list_for_each_entry_safe(entry, next, list, head) {
- switch (entry->state) {
- case VMW_COMPAT_ADD:
- vmw_compat_shader_free(man, entry);
- break;
- case VMW_COMPAT_DEL:
- ret = drm_ht_insert_item(&man->shaders, &entry->hash);
- list_del(&entry->head);
- list_add_tail(&entry->head, &man->list);
- entry->state = VMW_COMPAT_COMMITED;
- break;
- default:
- BUG();
- break;
- }
- }
+ return user_key | (shader_type << 20);
}
/**
* vmw_compat_shader_remove - Stage a compat shader for removal.
*
- * @man: Pointer to the compat shader manager
+ * @man: Pointer to the compat shader manager identifying the shader namespace.
* @user_key: The key that is used to identify the shader. The key is
* unique to the shader type.
* @shader_type: Shader type.
- * @list: Caller's list of staged shader actions.
- *
- * This function stages a compat shader for removal and removes the key from
- * the shader manager's hash table. If the shader was previously only staged
- * for addition it is completely removed (But the execbuf code may keep a
- * reference if it was bound to a context between addition and removal). If
- * it was previously commited to the manager, it is staged for removal.
+ * @list: Caller's list of staged command buffer resource actions.
*/
-int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
u32 user_key, SVGA3dShaderType shader_type,
struct list_head *list)
{
- struct vmw_compat_shader *entry;
- struct drm_hash_item *hash;
- int ret;
-
- ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
- &hash);
- if (likely(ret != 0))
+ if (!vmw_compat_shader_id_ok(user_key, shader_type))
return -EINVAL;
- entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
-
- switch (entry->state) {
- case VMW_COMPAT_ADD:
- vmw_compat_shader_free(man, entry);
- break;
- case VMW_COMPAT_COMMITED:
- (void) drm_ht_remove_item(&man->shaders, &entry->hash);
- list_del(&entry->head);
- entry->state = VMW_COMPAT_DEL;
- list_add_tail(&entry->head, list);
- break;
- default:
- BUG();
- break;
- }
-
- return 0;
+ return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader,
+ vmw_compat_shader_key(user_key,
+ shader_type),
+ list);
}
/**
- * vmw_compat_shader_add - Create a compat shader and add the
- * key to the manager
+ * vmw_compat_shader_add - Create a compat shader and stage it for addition
+ * as a command buffer managed resource.
*
- * @man: Pointer to the compat shader manager
+ * @man: Pointer to the compat shader manager identifying the shader namespace.
* @user_key: The key that is used to identify the shader. The key is
* unique to the shader type.
* @bytecode: Pointer to the bytecode of the shader.
* @shader_type: Shader type.
* @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
* to be created with.
- * @list: Caller's list of staged shader actions.
+ * @list: Caller's list of staged command buffer resource actions.
*
- * Note that only the key is added to the shader manager's hash table.
- * The shader is not yet added to the shader manager's list of shaders.
*/
-int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+int vmw_compat_shader_add(struct vmw_private *dev_priv,
+ struct vmw_cmdbuf_res_manager *man,
u32 user_key, const void *bytecode,
SVGA3dShaderType shader_type,
size_t size,
- struct ttm_object_file *tfile,
struct list_head *list)
{
struct vmw_dma_buffer *buf;
struct ttm_bo_kmap_obj map;
bool is_iomem;
- struct vmw_compat_shader *compat;
- u32 handle;
int ret;
+ struct vmw_resource *res;
- if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
+ if (!vmw_compat_shader_id_ok(user_key, shader_type))
return -EINVAL;
/* Allocate and pin a DMA buffer */
@@ -703,7 +599,7 @@ int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
if (unlikely(buf == NULL))
return -ENOMEM;
- ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
+ ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
true, vmw_dmabuf_bo_free);
if (unlikely(ret != 0))
goto out;
@@ -728,84 +624,40 @@ int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
WARN_ON(ret != 0);
ttm_bo_unreserve(&buf->base);
- /* Create a guest-backed shader container backed by the dma buffer */
- ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
- tfile, &handle);
- vmw_dmabuf_unreference(&buf);
+ res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
if (unlikely(ret != 0))
goto no_reserve;
- /*
- * Create a compat shader structure and stage it for insertion
- * in the manager
- */
- compat = kzalloc(sizeof(*compat), GFP_KERNEL);
- if (compat == NULL)
- goto no_compat;
-
- compat->hash.key = user_key | (shader_type << 24);
- ret = drm_ht_insert_item(&man->shaders, &compat->hash);
- if (unlikely(ret != 0))
- goto out_invalid_key;
-
- compat->state = VMW_COMPAT_ADD;
- compat->handle = handle;
- compat->tfile = tfile;
- list_add_tail(&compat->head, list);
-
- return 0;
-out_invalid_key:
- kfree(compat);
-no_compat:
- ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+ ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader,
+ vmw_compat_shader_key(user_key, shader_type),
+ res, list);
+ vmw_resource_unreference(&res);
no_reserve:
+ vmw_dmabuf_unreference(&buf);
out:
return ret;
}
/**
- * vmw_compat_shader_man_create - Create a compat shader manager
- *
- * @dev_priv: Pointer to a device private structure.
- *
- * Typically done at file open time. If successful returns a pointer to a
- * compat shader manager. Otherwise returns an error pointer.
- */
-struct vmw_compat_shader_manager *
-vmw_compat_shader_man_create(struct vmw_private *dev_priv)
-{
- struct vmw_compat_shader_manager *man;
- int ret;
-
- man = kzalloc(sizeof(*man), GFP_KERNEL);
- if (man == NULL)
- return ERR_PTR(-ENOMEM);
-
- man->dev_priv = dev_priv;
- INIT_LIST_HEAD(&man->list);
- ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
- if (ret == 0)
- return man;
-
- kfree(man);
- return ERR_PTR(ret);
-}
-
-/**
- * vmw_compat_shader_man_destroy - Destroy a compat shader manager
+ * vmw_compat_shader_lookup - Look up a compat shader
*
- * @man: Pointer to the shader manager to destroy.
+ * @man: Pointer to the command buffer managed resource manager identifying
+ * the shader namespace.
+ * @user_key: The user space id of the shader.
+ * @shader_type: The shader type.
*
- * Typically done at file close time.
+ * Returns a refcounted pointer to a struct vmw_resource if the shader was
+ * found. An error pointer otherwise.
*/
-void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
+struct vmw_resource *
+vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+ u32 user_key,
+ SVGA3dShaderType shader_type)
{
- struct vmw_compat_shader *entry, *next;
-
- mutex_lock(&man->dev_priv->cmdbuf_mutex);
- list_for_each_entry_safe(entry, next, &man->list, head)
- vmw_compat_shader_free(man, entry);
+ if (!vmw_compat_shader_id_ok(user_key, shader_type))
+ return ERR_PTR(-EINVAL);
- mutex_unlock(&man->dev_priv->cmdbuf_mutex);
- kfree(man);
+ return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader,
+ vmw_compat_shader_key(user_key,
+ shader_type));
}
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 112f27e51bc7..63bd63f3c7df 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -185,16 +185,16 @@ static unsigned int pin_job(struct host1x_job *job)
struct sg_table *sgt;
dma_addr_t phys_addr;
- reloc->target = host1x_bo_get(reloc->target);
- if (!reloc->target)
+ reloc->target.bo = host1x_bo_get(reloc->target.bo);
+ if (!reloc->target.bo)
goto unpin;
- phys_addr = host1x_bo_pin(reloc->target, &sgt);
+ phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
if (!phys_addr)
goto unpin;
job->addr_phys[job->num_unpins] = phys_addr;
- job->unpins[job->num_unpins].bo = reloc->target;
+ job->unpins[job->num_unpins].bo = reloc->target.bo;
job->unpins[job->num_unpins].sgt = sgt;
job->num_unpins++;
}
@@ -235,21 +235,21 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
for (i = 0; i < job->num_relocs; i++) {
struct host1x_reloc *reloc = &job->relocarray[i];
u32 reloc_addr = (job->reloc_addr_phys[i] +
- reloc->target_offset) >> reloc->shift;
+ reloc->target.offset) >> reloc->shift;
u32 *target;
/* skip all other gathers */
- if (cmdbuf != reloc->cmdbuf)
+ if (cmdbuf != reloc->cmdbuf.bo)
continue;
- if (last_page != reloc->cmdbuf_offset >> PAGE_SHIFT) {
+ if (last_page != reloc->cmdbuf.offset >> PAGE_SHIFT) {
if (cmdbuf_page_addr)
host1x_bo_kunmap(cmdbuf, last_page,
cmdbuf_page_addr);
cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
- reloc->cmdbuf_offset >> PAGE_SHIFT);
- last_page = reloc->cmdbuf_offset >> PAGE_SHIFT;
+ reloc->cmdbuf.offset >> PAGE_SHIFT);
+ last_page = reloc->cmdbuf.offset >> PAGE_SHIFT;
if (unlikely(!cmdbuf_page_addr)) {
pr_err("Could not map cmdbuf for relocation\n");
@@ -257,7 +257,7 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
}
}
- target = cmdbuf_page_addr + (reloc->cmdbuf_offset & ~PAGE_MASK);
+ target = cmdbuf_page_addr + (reloc->cmdbuf.offset & ~PAGE_MASK);
*target = reloc_addr;
}
@@ -272,7 +272,7 @@ static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
{
offset *= sizeof(u32);
- if (reloc->cmdbuf != cmdbuf || reloc->cmdbuf_offset != offset)
+ if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
return false;
return true;
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index af0259708358..d2077f040f3e 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -237,12 +237,10 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
if (conflict->locks & lwants)
return conflict;
- /* Ok, now check if he owns the resource we want. We don't need
- * to check "decodes" since it should be impossible to own
- * own legacy resources you don't decode unless I have a bug
- * in this code...
+ /* Ok, now check if it owns the resource we want. We can
+ * lock resources that are not decoded, therefore a device
+ * can own resources it doesn't decode.
*/
- WARN_ON(conflict->owns & ~conflict->decodes);
match = lwants & conflict->owns;
if (!match)
continue;
@@ -254,13 +252,19 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
flags = 0;
pci_bits = 0;
+ /* If we can't control legacy resources via the bridge, we
+ * also need to disable normal decoding.
+ */
if (!conflict->bridge_has_one_vga) {
- vga_irq_set_state(conflict, false);
- flags |= PCI_VGA_STATE_CHANGE_DECODES;
- if (match & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
+ if ((match & conflict->decodes) & VGA_RSRC_LEGACY_MEM)
pci_bits |= PCI_COMMAND_MEMORY;
- if (match & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
+ if ((match & conflict->decodes) & VGA_RSRC_LEGACY_IO)
pci_bits |= PCI_COMMAND_IO;
+
+ if (pci_bits) {
+ vga_irq_set_state(conflict, false);
+ flags |= PCI_VGA_STATE_CHANGE_DECODES;
+ }
}
if (change_bridge)
@@ -268,18 +272,19 @@ static struct vga_device *__vga_tryget(struct vga_device *vgadev,
pci_set_vga_state(conflict->pdev, false, pci_bits, flags);
conflict->owns &= ~match;
- /* If he also owned non-legacy, that is no longer the case */
- if (match & VGA_RSRC_LEGACY_MEM)
+
+ /* If we disabled normal decoding, reflect it in owns */
+ if (pci_bits & PCI_COMMAND_MEMORY)
conflict->owns &= ~VGA_RSRC_NORMAL_MEM;
- if (match & VGA_RSRC_LEGACY_IO)
+ if (pci_bits & PCI_COMMAND_IO)
conflict->owns &= ~VGA_RSRC_NORMAL_IO;
}
enable_them:
/* ok dude, we got it, everybody conflicting has been disabled, let's
- * enable us. Make sure we don't mark a bit in "owns" that we don't
- * also have in "decodes". We can lock resources we don't decode but
- * not own them.
+ * enable us. Mark any bits in "owns" regardless of whether we
+ * decoded them. We can lock resources we don't decode, therefore
+ * we must track them via "owns".
*/
flags = 0;
pci_bits = 0;
@@ -291,7 +296,7 @@ enable_them:
if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
pci_bits |= PCI_COMMAND_IO;
}
- if (!!(wants & VGA_RSRC_LEGACY_MASK))
+ if (wants & VGA_RSRC_LEGACY_MASK)
flags |= PCI_VGA_STATE_CHANGE_BRIDGE;
pci_set_vga_state(vgadev->pdev, true, pci_bits, flags);
@@ -299,7 +304,7 @@ enable_them:
if (!vgadev->bridge_has_one_vga) {
vga_irq_set_state(vgadev, true);
}
- vgadev->owns |= (wants & vgadev->decodes);
+ vgadev->owns |= wants;
lock_them:
vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK);
if (rsrc & VGA_RSRC_LEGACY_IO)
@@ -649,7 +654,6 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev,
old_decodes = vgadev->decodes;
decodes_removed = ~new_decodes & old_decodes;
decodes_unlocked = vgadev->locks & decodes_removed;
- vgadev->owns &= ~decodes_removed;
vgadev->decodes = new_decodes;
pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",