summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe')
-rw-r--r--drivers/gpu/drm/xe/Kconfig47
-rw-r--r--drivers/gpu/drm/xe/Kconfig.debug16
-rw-r--r--drivers/gpu/drm/xe/Kconfig.profile1
-rw-r--r--drivers/gpu/drm/xe/Makefile46
-rw-r--r--drivers/gpu/drm/xe/abi/gsc_pxp_commands_abi.h41
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h30
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h3
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h99
-rw-r--r--drivers/gpu/drm/xe/abi/guc_capture_abi.h186
-rw-r--r--drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h1
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h22
-rw-r--r--drivers/gpu/drm/xe/abi/guc_log_abi.h75
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h1
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h17
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h63
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h12
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_types.h11
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h7
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h11
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h14
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h64
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h17
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_gtt_view_types.h7
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h13
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h3
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h74
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h67
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h78
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore_trace.h (renamed from drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h)0
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h4
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h23
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/soc/intel_rom.h6
-rw-r--r--drivers/gpu/drm/xe/display/ext/i915_irq.c69
-rw-r--r--drivers/gpu/drm/xe/display/intel_bo.c61
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.c28
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.h24
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c20
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c360
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h25
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.c71
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rps.c17
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_wa.c6
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c18
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c132
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c135
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c43
-rw-r--r--drivers/gpu/drm/xe/display/xe_tdf.c6
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_alu_commands.h79
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h1
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gpu_commands.h1
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_instr_defs.h1
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mfx_commands.h28
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mi_commands.h14
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h19
-rw-r--r--drivers/gpu/drm/xe/regs/xe_eu_stall_regs.h29
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h100
-rw-r--r--drivers/gpu/drm/xe/regs/xe_guc_regs.h2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_irq_regs.h90
-rw-r--r--drivers/gpu/drm/xe/regs/xe_lrc_layout.h5
-rw-r--r--drivers/gpu/drm/xe/regs/xe_mchbar_regs.h14
-rw-r--r--drivers/gpu/drm/xe/regs/xe_oa_regs.h15
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pcode_regs.h9
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pmt.h19
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pxp_regs.h23
-rw-r--r--drivers/gpu/drm/xe/regs/xe_reg_defs.h32
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h22
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c276
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c334
-rw-r--r--drivers/gpu/drm/xe/tests/xe_live_test_mod.c4
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c28
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c30
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c26
-rw-r--r--drivers/gpu/drm/xe/tests/xe_rtp_test.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_test_mod.c2
-rw-r--r--drivers/gpu/drm/xe/xe_assert.h10
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c2
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c1214
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h116
-rw-r--r--drivers/gpu/drm/xe/xe_bo_doc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c407
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.h10
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h28
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c250
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.h24
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c33
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c386
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.h13
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump_types.h29
-rw-r--r--drivers/gpu/drm/xe/xe_device.c437
-rw-r--r--drivers/gpu/drm/xe/xe_device.h33
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c111
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h241
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c11
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c91
-rw-r--r--drivers/gpu/drm/xe/xe_drv.h1
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c964
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.h25
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c29
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c134
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.h5
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h16
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c46
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.c132
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake.h23
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_gen_wa_oob.c6
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c72
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.c17
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h20
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c74
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.c126
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_proxy.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c304
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h30
-rw-r--r--drivers/gpu/drm/xe/xe_gt_ccs_mode.c19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_clock.c97
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c74
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c96
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c192
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c83
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c41
-rw-r--r--drivers/gpu/drm/xe/xe_gt_printk.h33
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c117
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.h7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c500
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h9
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c44
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c193
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c419
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h24
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h40
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c75
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c38
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h16
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c95
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c13
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.h8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats_types.h18
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle.c96
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c94
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c34
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.h13
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h43
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c492
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h5
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c278
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.c176
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.h47
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf_types.h28
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.c2011
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.h61
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture_types.h70
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c541
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h14
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct_types.h29
-rw-r--r--drivers/gpu/drm/xe/xe_guc_debugfs.c139
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.c520
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.h22
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity_types.h102
-rw-r--r--drivers/gpu/drm/xe/xe_guc_fwif.h47
-rw-r--r--drivers/gpu/drm/xe/xe_guc_klv_helpers.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h7
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c315
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.h15
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log_types.h34
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c298
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay.c4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c325
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h24
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c46
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.h3
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c202
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.h7
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c14
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c366
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.h6
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c109
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.c1
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_types.h68
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c608
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.h4
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c452
-rw-r--r--drivers/gpu/drm/xe/xe_irq.h8
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c8
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c349
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h37
-rw-r--r--drivers/gpu/drm/xe/xe_lrc_types.h9
-rw-r--r--drivers/gpu/drm/xe/xe_macros.h12
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.c225
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.h6
-rw-r--r--drivers/gpu/drm/xe/xe_memirq_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c391
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h14
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c211
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h43
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.c32
-rw-r--r--drivers/gpu/drm/xe/xe_module.c20
-rw-r--r--drivers/gpu/drm/xe/xe_module.h1
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c1144
-rw-r--r--drivers/gpu/drm/xe/xe_oa.h4
-rw-r--r--drivers/gpu/drm/xe/xe_oa_types.h20
-rw-r--r--drivers/gpu/drm/xe/xe_observation.c16
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c88
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c412
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.c72
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h21
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c19
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.h3
-rw-r--r--drivers/gpu/drm/xe/xe_pcode_api.h29
-rw-r--r--drivers/gpu/drm/xe/xe_platform_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c155
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h2
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.c588
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.h18
-rw-r--r--drivers/gpu/drm/xe/xe_pmu_types.h39
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c707
-rw-r--r--drivers/gpu/drm/xe/xe_pt.h5
-rw-r--r--drivers/gpu/drm/xe/xe_pt_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.c3
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.h4
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.c923
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.h35
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_debugfs.c129
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_debugfs.h13
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_submit.c588
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_submit.h22
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_types.h135
-rw-r--r--drivers/gpu/drm/xe/xe_query.c178
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr.c104
-rw-r--r--drivers/gpu/drm/xe/xe_reg_sr_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_reg_whitelist.c41
-rw-r--r--drivers/gpu/drm/xe/xe_res_cursor.h123
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c57
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c14
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h17
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c56
-rw-r--r--drivers/gpu/drm/xe/xe_sa.h30
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c2
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h9
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.c258
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.h18
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c11
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_helpers.h2
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_types.h17
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.c263
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.h14
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.c281
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.h17
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode_types.h35
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c1043
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h185
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c9
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c26
-rw-r--r--drivers/gpu/drm/xe/xe_tile.h1
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h48
-rw-r--r--drivers/gpu/drm/xe/xe_trace_bo.h36
-rw-r--r--drivers/gpu/drm/xe/xe_trace_guc.h49
-rw-r--r--drivers/gpu/drm/xe/xe_trace_lrc.c9
-rw-r--r--drivers/gpu/drm/xe/xe_trace_lrc.h52
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c77
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h2
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_sys_mgr.c3
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.c64
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c94
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.h3
-rw-r--r--drivers/gpu/drm/xe/xe_uc.c24
-rw-r--r--drivers/gpu/drm/xe/xe_uc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c113
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c774
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h103
-rw-r--r--drivers/gpu/drm/xe/xe_vm_doc.h24
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h76
-rw-r--r--drivers/gpu/drm/xe/xe_vram.c24
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.c233
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.h11
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c117
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules22
-rw-r--r--drivers/gpu/drm/xe/xe_wait_user_fence.c7
-rw-r--r--drivers/gpu/drm/xe/xe_wopcm.c15
300 files changed, 24668 insertions, 5541 deletions
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 7bbe46a98ff1..fcc2677a4229 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -1,19 +1,23 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_XE
tristate "Intel Xe Graphics"
- depends on DRM && PCI && MMU && (m || (y && KUNIT=y))
+ depends on DRM && PCI && (m || (y && KUNIT=y))
+ depends on INTEL_VSEC || !INTEL_VSEC
+ depends on X86_PLATFORM_DEVICES || !(X86 && ACPI)
select INTERVAL_TREE
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
select SHMEM
select TMPFS
select DRM_BUDDY
+ select DRM_CLIENT_SELECTION
select DRM_EXEC
select DRM_KMS_HELPER
select DRM_KUNIT_TEST_HELPERS if DRM_XE_KUNIT_TEST != n
select DRM_PANEL
select DRM_SUBALLOC_HELPER
select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_DSC_HELPER
select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
@@ -25,7 +29,6 @@ config DRM_XE
select BACKLIGHT_CLASS_DEVICE if ACPI
select INPUT if ACPI
select ACPI_VIDEO if X86 && ACPI
- select X86_PLATFORM_DEVICES if X86 && ACPI
select ACPI_WMI if X86 && ACPI
select SYNC_FILE
select IOSF_MBI
@@ -49,14 +52,50 @@ config DRM_XE
config DRM_XE_DISPLAY
bool "Enable display support"
- depends on DRM_XE && DRM_XE=m
- select FB_IOMEM_HELPERS
+ depends on DRM_XE && DRM_XE=m && HAS_IOPORT
+ select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION
select I2C
select I2C_ALGOBIT
default y
help
Disable this option only if you want to compile out display support.
+config DRM_XE_DP_TUNNEL
+ bool "Enable DP tunnel support"
+ depends on DRM_XE_DISPLAY
+ depends on USB4
+ select DRM_DISPLAY_DP_TUNNEL
+ default y
+ help
+ Choose this option to detect DP tunnels and enable the Bandwidth
+ Allocation mode for such tunnels. This allows using the maximum
+ resolution allowed by the link BW on all displays sharing the
+ link BW, for instance on a Thunderbolt link.
+
+ If in doubt say "Y".
+
+config DRM_XE_GPUSVM
+ bool "Enable CPU to GPU address mirroring"
+ depends on DRM_XE
+ depends on !UML
+ depends on DEVICE_PRIVATE
+ default y
+ select DRM_GPUSVM
+ help
+ Enable this option if you want support for CPU to GPU address
+ mirroring.
+
+ If in doubut say "Y".
+
+config DRM_XE_DEVMEM_MIRROR
+ bool "Enable device memory mirror"
+ depends on DRM_XE_GPUSVM
+ select GET_FREE_REGION
+ default y
+ help
+ Disable this option only if you want to compile out without device
+ memory mirror. Will reduce KMD memory footprint when disabled.
+
config DRM_XE_FORCE_PROBE
string "Force probe xe for selected Intel hardware IDs"
depends on DRM_XE
diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug
index bc177368af6c..0d749ed44878 100644
--- a/drivers/gpu/drm/xe/Kconfig.debug
+++ b/drivers/gpu/drm/xe/Kconfig.debug
@@ -40,9 +40,21 @@ config DRM_XE_DEBUG_VM
If in doubt, say "N".
+config DRM_XE_DEBUG_MEMIRQ
+ bool "Enable extra memirq debugging"
+ default n
+ help
+ Choose this option to enable additional debugging info for
+ memory based interrupts.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_XE_DEBUG_SRIOV
bool "Enable extra SR-IOV debugging"
default n
+ select DRM_XE_DEBUG_MEMIRQ
help
Enable extra SR-IOV debugging info.
@@ -54,7 +66,7 @@ config DRM_XE_DEBUG_MEM
bool "Enable passing SYS/VRAM addresses to user space"
default n
help
- Pass object location trough uapi. Intended for extended
+ Pass object location through uapi. Intended for extended
testing and development only.
Recommended for driver developers only.
@@ -92,5 +104,5 @@ config DRM_XE_USERPTR_INVAL_INJECT
Choose this option when debugging error paths that
are hit during checks for userptr invalidations.
- Recomended for driver developers only.
+ Recommended for driver developers only.
If in doubt, say "N".
diff --git a/drivers/gpu/drm/xe/Kconfig.profile b/drivers/gpu/drm/xe/Kconfig.profile
index ba17a25e8db3..7530df998148 100644
--- a/drivers/gpu/drm/xe/Kconfig.profile
+++ b/drivers/gpu/drm/xe/Kconfig.profile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
config DRM_XE_JOB_TIMEOUT_MAX
int "Default max job timeout (ms)"
default 10000 # milliseconds
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index edfd812e0f41..e4bf484d4121 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -33,9 +33,10 @@ xe-y += xe_bb.o \
xe_device_sysfs.o \
xe_dma_buf.o \
xe_drm_client.o \
+ xe_eu_stall.o \
xe_exec.o \
- xe_execlist.o \
xe_exec_queue.o \
+ xe_execlist.o \
xe_force_wake.o \
xe_ggtt.o \
xe_gpu_scheduler.o \
@@ -56,8 +57,11 @@ xe-y += xe_bb.o \
xe_gt_topology.o \
xe_guc.o \
xe_guc_ads.o \
+ xe_guc_buf.o \
+ xe_guc_capture.o \
xe_guc_ct.o \
xe_guc_db_mgr.o \
+ xe_guc_engine_activity.o \
xe_guc_hwconfig.o \
xe_guc_id_mgr.o \
xe_guc_klv_helpers.o \
@@ -65,11 +69,11 @@ xe-y += xe_bb.o \
xe_guc_pc.o \
xe_guc_submit.o \
xe_heci_gsc.o \
+ xe_huc.o \
xe_hw_engine.o \
xe_hw_engine_class_sysfs.o \
xe_hw_engine_group.o \
xe_hw_fence.o \
- xe_huc.o \
xe_irq.o \
xe_lrc.o \
xe_migrate.o \
@@ -85,23 +89,29 @@ xe-y += xe_bb.o \
xe_preempt_fence.o \
xe_pt.o \
xe_pt_walk.o \
+ xe_pxp.o \
+ xe_pxp_debugfs.o \
+ xe_pxp_submit.o \
xe_query.o \
xe_range_fence.o \
xe_reg_sr.o \
xe_reg_whitelist.o \
- xe_rtp.o \
xe_ring_ops.o \
+ xe_rtp.o \
xe_sa.o \
xe_sched_job.o \
+ xe_shrinker.o \
xe_step.o \
+ xe_survivability_mode.o \
xe_sync.o \
xe_tile.o \
xe_tile_sysfs.o \
xe_trace.o \
xe_trace_bo.o \
xe_trace_guc.o \
- xe_ttm_sys_mgr.o \
+ xe_trace_lrc.o \
xe_ttm_stolen_mgr.o \
+ xe_ttm_sys_mgr.o \
xe_ttm_vram_mgr.o \
xe_tuning.o \
xe_uc.o \
@@ -109,26 +119,33 @@ xe-y += xe_bb.o \
xe_vm.o \
xe_vram.o \
xe_vram_freq.o \
- xe_wait_user_fence.o \
+ xe_vsec.o \
xe_wa.o \
+ xe_wait_user_fence.o \
xe_wopcm.o
xe-$(CONFIG_HMM_MIRROR) += xe_hmm.o
+xe-$(CONFIG_DRM_XE_GPUSVM) += xe_svm.o
# graphics hardware monitoring (HWMON) support
xe-$(CONFIG_HWMON) += xe_hwmon.o
+xe-$(CONFIG_PERF_EVENTS) += xe_pmu.o
+xe-$(CONFIG_CONFIGFS_FS) += xe_configfs.o
+
# graphics virtualization (SR-IOV) support
xe-y += \
xe_gt_sriov_vf.o \
xe_guc_relay.o \
xe_memirq.o \
- xe_sriov.o
+ xe_sriov.o \
+ xe_sriov_vf.o
xe-$(CONFIG_PCI_IOV) += \
xe_gt_sriov_pf.o \
xe_gt_sriov_pf_config.o \
xe_gt_sriov_pf_control.o \
+ xe_gt_sriov_pf_migration.o \
xe_gt_sriov_pf_monitor.o \
xe_gt_sriov_pf_policy.o \
xe_gt_sriov_pf_service.o \
@@ -148,7 +165,6 @@ subdir-ccflags-$(CONFIG_DRM_XE_DISPLAY) += \
-I$(src)/display/ext \
-I$(src)/compat-i915-headers \
-I$(srctree)/drivers/gpu/drm/i915/display/ \
- -Ddrm_i915_gem_object=xe_bo \
-Ddrm_i915_private=xe_device
# Rule to build SOC code shared with i915
@@ -165,11 +181,12 @@ $(obj)/i915-display/%.o: $(srctree)/drivers/gpu/drm/i915/display/%.c FORCE
xe-$(CONFIG_DRM_XE_DISPLAY) += \
display/ext/i915_irq.o \
display/ext/i915_utils.o \
+ display/intel_bo.o \
display/intel_fb_bo.o \
display/intel_fbdev_fb.o \
display/xe_display.o \
display/xe_display_misc.o \
- display/xe_display_rps.o \
+ display/xe_display_rpm.o \
display/xe_display_wa.o \
display/xe_dsb_buffer.o \
display/xe_fb_pin.o \
@@ -180,7 +197,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
# SOC code shared with i915
xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-soc/intel_dram.o \
- i915-soc/intel_pch.o
+ i915-soc/intel_rom.o
# Display code shared with i915
xe-$(CONFIG_DRM_XE_DISPLAY) += \
@@ -193,6 +210,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_bios.o \
i915-display/intel_bw.o \
i915-display/intel_cdclk.o \
+ i915-display/intel_cmtg.o \
i915-display/intel_color.o \
i915-display/intel_combo_phy.o \
i915-display/intel_connector.o \
@@ -203,6 +221,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_ddi.o \
i915-display/intel_ddi_buf_trans.o \
i915-display/intel_display.o \
+ i915-display/intel_display_conversion.o \
i915-display/intel_display_device.o \
i915-display/intel_display_driver.o \
i915-display/intel_display_irq.o \
@@ -214,12 +233,14 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_display_wa.o \
i915-display/intel_dkl_phy.o \
i915-display/intel_dmc.o \
+ i915-display/intel_dmc_wl.o \
i915-display/intel_dp.o \
i915-display/intel_dp_aux.o \
i915-display/intel_dp_aux_backlight.o \
i915-display/intel_dp_hdcp.o \
i915-display/intel_dp_link_training.o \
i915-display/intel_dp_mst.o \
+ i915-display/intel_dp_test.o \
i915-display/intel_dpll.o \
i915-display/intel_dpll_mgr.o \
i915-display/intel_dpt_common.o \
@@ -248,18 +269,20 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_modeset_setup.o \
i915-display/intel_modeset_verify.o \
i915-display/intel_panel.o \
+ i915-display/intel_pfit.o \
i915-display/intel_pmdemand.o \
+ i915-display/intel_pch.o \
i915-display/intel_pps.o \
i915-display/intel_psr.o \
i915-display/intel_qp_tables.o \
i915-display/intel_quirks.o \
+ i915-display/intel_snps_hdmi_pll.o \
i915-display/intel_snps_phy.o \
i915-display/intel_tc.o \
i915-display/intel_vblank.o \
i915-display/intel_vdsc.o \
i915-display/intel_vga.o \
i915-display/intel_vrr.o \
- i915-display/intel_dmc_wl.o \
i915-display/intel_wm.o \
i915-display/skl_scaler.o \
i915-display/skl_universal_plane.o \
@@ -292,6 +315,9 @@ ifeq ($(CONFIG_DEBUG_FS),y)
i915-display/intel_pipe_crc.o
endif
+xe-$(CONFIG_DRM_XE_DP_TUNNEL) += \
+ i915-display/intel_dp_tunnel.o
+
obj-$(CONFIG_DRM_XE) += xe.o
obj-$(CONFIG_DRM_XE_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/xe/abi/gsc_pxp_commands_abi.h b/drivers/gpu/drm/xe/abi/gsc_pxp_commands_abi.h
index 57520809e48d..290e431cf10d 100644
--- a/drivers/gpu/drm/xe/abi/gsc_pxp_commands_abi.h
+++ b/drivers/gpu/drm/xe/abi/gsc_pxp_commands_abi.h
@@ -6,6 +6,7 @@
#ifndef _ABI_GSC_PXP_COMMANDS_ABI_H
#define _ABI_GSC_PXP_COMMANDS_ABI_H
+#include <linux/sizes.h>
#include <linux/types.h>
/* Heci client ID for PXP commands */
@@ -14,6 +15,12 @@
#define PXP_APIVER(x, y) (((x) & 0xFFFF) << 16 | ((y) & 0xFFFF))
/*
+ * A PXP sub-section in an HECI packet can be up to 64K big in each direction.
+ * This does not include the top-level GSC header.
+ */
+#define PXP_MAX_PACKET_SIZE SZ_64K
+
+/*
* there are a lot of status codes for PXP, but we only define the cross-API
* common ones that we actually can handle in the kernel driver. Other failure
* codes should be printed to error msg for debug.
@@ -24,6 +31,7 @@ enum pxp_status {
PXP_STATUS_NOT_READY = 0x100e,
PXP_STATUS_PLATFCONFIG_KF1_NOVERIF = 0x101a,
PXP_STATUS_PLATFCONFIG_KF1_BAD = 0x101f,
+ PXP_STATUS_PLATFCONFIG_FIXED_KF1_NOT_SUPPORTED = 0x1037,
PXP_STATUS_OP_NOT_PERMITTED = 0x4013
};
@@ -42,6 +50,8 @@ struct pxp_cmd_header {
u32 buffer_len;
} __packed;
+#define PXP43_CMDID_INVALIDATE_STREAM_KEY 0x00000007
+#define PXP43_CMDID_INIT_SESSION 0x00000036
#define PXP43_CMDID_NEW_HUC_AUTH 0x0000003F /* MTL+ */
/* PXP-Input-Packet: HUC Auth-only */
@@ -56,4 +66,35 @@ struct pxp43_huc_auth_out {
struct pxp_cmd_header header;
} __packed;
+/* PXP-Input-Packet: Init PXP session */
+struct pxp43_create_arb_in {
+ struct pxp_cmd_header header;
+ /* header.stream_id fields for vesion 4.3 of Init PXP session: */
+ #define PXP43_INIT_SESSION_VALID BIT(0)
+ #define PXP43_INIT_SESSION_APPTYPE BIT(1)
+ #define PXP43_INIT_SESSION_APPID GENMASK(17, 2)
+ u32 protection_mode;
+ #define PXP43_INIT_SESSION_PROTECTION_ARB 0x2
+ u32 sub_session_id;
+ u32 init_flags;
+ u32 rsvd[12];
+} __packed;
+
+/* PXP-Input-Packet: Init PXP session */
+struct pxp43_create_arb_out {
+ struct pxp_cmd_header header;
+ u32 rsvd[8];
+} __packed;
+
+/* PXP-Input-Packet: Invalidate Stream Key */
+struct pxp43_inv_stream_key_in {
+ struct pxp_cmd_header header;
+ u32 rsvd[3];
+} __packed;
+
+/* PXP-Output-Packet: Invalidate Stream Key */
+struct pxp43_inv_stream_key_out {
+ struct pxp_cmd_header header;
+ u32 rsvd;
+} __packed;
#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index 43ad4652c2b2..448afb86e05c 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -134,10 +134,14 @@ enum xe_guc_action {
XE_GUC_ACTION_DEREGISTER_CONTEXT = 0x4503,
XE_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
XE_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
+ XE_GUC_ACTION_REGISTER_G2G = 0x4507,
+ XE_GUC_ACTION_DEREGISTER_G2G = 0x4508,
XE_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
XE_GUC_ACTION_CLIENT_SOFT_RESET = 0x5507,
XE_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A,
+ XE_GUC_ACTION_SET_DEVICE_ENGINE_ACTIVITY_BUFFER = 0x550C,
+ XE_GUC_ACTION_SET_FUNCTION_ENGINE_ACTIVITY_BUFFER = 0x550D,
XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR = 0x6000,
XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC = 0x6002,
XE_GUC_ACTION_PAGE_FAULT_RES_DESC = 0x6003,
@@ -176,6 +180,14 @@ enum xe_guc_sleep_state_status {
#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
#define GUC_LOG_CONTROL_DEFAULT_LOGGING (1 << 8)
+enum xe_guc_state_capture_event_status {
+ XE_GUC_STATE_CAPTURE_EVENT_STATUS_SUCCESS = 0x0,
+ XE_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE = 0x1,
+};
+
+#define XE_GUC_STATE_CAPTURE_EVENT_STATUS_MASK 0x000000FF
+#define XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION_DATA_LEN 1
+
#define XE_GUC_TLB_INVAL_TYPE_SHIFT 0
#define XE_GUC_TLB_INVAL_MODE_SHIFT 8
/* Flush PPC or SMRO caches along with TLB invalidation request */
@@ -210,4 +222,22 @@ enum xe_guc_tlb_inval_mode {
XE_GUC_TLB_INVAL_MODE_LITE = 0x1,
};
+/*
+ * GuC to GuC communication (de-)registration fields:
+ */
+enum xe_guc_g2g_type {
+ XE_G2G_TYPE_IN = 0x0,
+ XE_G2G_TYPE_OUT,
+ XE_G2G_TYPE_LIMIT,
+};
+
+#define XE_G2G_REGISTER_DEVICE REG_GENMASK(16, 16)
+#define XE_G2G_REGISTER_TILE REG_GENMASK(15, 12)
+#define XE_G2G_REGISTER_TYPE REG_GENMASK(11, 8)
+#define XE_G2G_REGISTER_SIZE REG_GENMASK(7, 0)
+
+#define XE_G2G_DEREGISTER_DEVICE REG_GENMASK(16, 16)
+#define XE_G2G_DEREGISTER_TILE REG_GENMASK(15, 12)
+#define XE_G2G_DEREGISTER_TYPE REG_GENMASK(11, 8)
+
#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
index 85abe4f09ae2..b28c8fa061f7 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
@@ -174,6 +174,9 @@ struct slpc_task_state_data {
};
} __packed;
+#define SLPC_CTX_FREQ_REQ_IS_COMPUTE REG_BIT(28)
+#define SLPC_OPTIMIZED_STRATEGY_COMPUTE REG_BIT(0)
+
struct slpc_shared_data_header {
/* Total size in bytes of this shared buffer. */
u32 size;
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
index 181180f5945c..0b28659d94e9 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_sriov_abi.h
@@ -502,6 +502,44 @@
#define VF2GUC_VF_RESET_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0
/**
+ * DOC: VF2GUC_NOTIFY_RESFIX_DONE
+ *
+ * This action is used by VF to notify the GuC that the VF KMD has completed
+ * post-migration recovery steps.
+ *
+ * This message must be sent as `MMIO HXG Message`_.
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | DATA0 = MBZ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_VF2GUC_NOTIFY_RESFIX_DONE` = 0x5508 |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | DATA0 = MBZ |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_VF2GUC_NOTIFY_RESFIX_DONE 0x5508u
+
+#define VF2GUC_NOTIFY_RESFIX_DONE_REQUEST_MSG_LEN GUC_HXG_REQUEST_MSG_MIN_LEN
+#define VF2GUC_NOTIFY_RESFIX_DONE_REQUEST_MSG_0_MBZ GUC_HXG_REQUEST_MSG_0_DATA0
+
+#define VF2GUC_NOTIFY_RESFIX_DONE_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define VF2GUC_NOTIFY_RESFIX_DONE_RESPONSE_MSG_0_MBZ GUC_HXG_RESPONSE_MSG_0_DATA0
+
+/**
* DOC: VF2GUC_QUERY_SINGLE_KLV
*
* This action is used by VF to query value of the single KLV data.
@@ -557,4 +595,65 @@
#define VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_2_VALUE64 GUC_HXG_REQUEST_MSG_n_DATAn
#define VF2GUC_QUERY_SINGLE_KLV_RESPONSE_MSG_3_VALUE96 GUC_HXG_REQUEST_MSG_n_DATAn
+/**
+ * DOC: PF2GUC_SAVE_RESTORE_VF
+ *
+ * This message is used by the PF to migrate VF info state maintained by the GuC.
+ *
+ * This message must be sent as `CTB HXG Message`_.
+ *
+ * Available since GuC version 70.25.0
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_HOST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:16 | DATA0 = **OPCODE** - operation to take: |
+ * | | | |
+ * | | | - _`GUC_PF_OPCODE_VF_SAVE` = 0 |
+ * | | | - _`GUC_PF_OPCODE_VF_RESTORE` = 1 |
+ * | +-------+--------------------------------------------------------------+
+ * | | 15:0 | ACTION = _`GUC_ACTION_PF2GUC_SAVE_RESTORE_VF` = 0x550B |
+ * +---+-------+--------------------------------------------------------------+
+ * | 1 | 31:0 | **VFID** - VF identifier |
+ * +---+-------+--------------------------------------------------------------+
+ * | 2 | 31:0 | **ADDR_LO** - lower 32-bits of GGTT offset to the buffer |
+ * | | | where the VF info will be save to or restored from. |
+ * +---+-------+--------------------------------------------------------------+
+ * | 3 | 31:0 | **ADDR_HI** - upper 32-bits of GGTT offset to the buffer |
+ * | | | where the VF info will be save to or restored from. |
+ * +---+-------+--------------------------------------------------------------+
+ * | 4 | 27:0 | **SIZE** - size of the buffer (in dwords) |
+ * | +-------+--------------------------------------------------------------+
+ * | | 31:28 | MBZ |
+ * +---+-------+--------------------------------------------------------------+
+ *
+ * +---+-------+--------------------------------------------------------------+
+ * | | Bits | Description |
+ * +===+=======+==============================================================+
+ * | 0 | 31 | ORIGIN = GUC_HXG_ORIGIN_GUC_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_ |
+ * | +-------+--------------------------------------------------------------+
+ * | | 27:0 | DATA0 = **USED** - size of used buffer space (in dwords) |
+ * +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_PF2GUC_SAVE_RESTORE_VF 0x550Bu
+
+#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_LEN (GUC_HXG_EVENT_MSG_MIN_LEN + 4u)
+#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_0_OPCODE GUC_HXG_EVENT_MSG_0_DATA0
+#define GUC_PF_OPCODE_VF_SAVE 0u
+#define GUC_PF_OPCODE_VF_RESTORE 1u
+#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_1_VFID GUC_HXG_EVENT_MSG_n_DATAn
+#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_2_ADDR_LO GUC_HXG_EVENT_MSG_n_DATAn
+#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_3_ADDR_HI GUC_HXG_EVENT_MSG_n_DATAn
+#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_4_SIZE (0xfffffffu << 0)
+#define PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_4_MBZ (0xfu << 28)
+
+#define PF2GUC_SAVE_RESTORE_VF_RESPONSE_MSG_LEN GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define PF2GUC_SAVE_RESTORE_VF_RESPONSE_MSG_0_USED GUC_HXG_RESPONSE_MSG_0_DATA0
+
#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_capture_abi.h b/drivers/gpu/drm/xe/abi/guc_capture_abi.h
new file mode 100644
index 000000000000..dd4117553739
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/guc_capture_abi.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_CAPTURE_ABI_H
+#define _ABI_GUC_CAPTURE_ABI_H
+
+#include <linux/types.h>
+
+/* Capture List Index */
+enum guc_capture_list_index_type {
+ GUC_CAPTURE_LIST_INDEX_PF = 0,
+ GUC_CAPTURE_LIST_INDEX_VF = 1,
+};
+
+#define GUC_CAPTURE_LIST_INDEX_MAX (GUC_CAPTURE_LIST_INDEX_VF + 1)
+
+/* Register-types of GuC capture register lists */
+enum guc_state_capture_type {
+ GUC_STATE_CAPTURE_TYPE_GLOBAL = 0,
+ GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS,
+ GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE
+};
+
+#define GUC_STATE_CAPTURE_TYPE_MAX (GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE + 1)
+
+/* Class indices for capture_class and capture_instance arrays */
+enum guc_capture_list_class_type {
+ GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE = 0,
+ GUC_CAPTURE_LIST_CLASS_VIDEO = 1,
+ GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE = 2,
+ GUC_CAPTURE_LIST_CLASS_BLITTER = 3,
+ GUC_CAPTURE_LIST_CLASS_GSC_OTHER = 4,
+};
+
+#define GUC_CAPTURE_LIST_CLASS_MAX (GUC_CAPTURE_LIST_CLASS_GSC_OTHER + 1)
+
+/**
+ * struct guc_mmio_reg - GuC MMIO reg state struct
+ *
+ * GuC MMIO reg state struct
+ */
+struct guc_mmio_reg {
+ /** @offset: MMIO Offset - filled in by Host */
+ u32 offset;
+ /** @value: MMIO Value - Used by Firmware to store value */
+ u32 value;
+ /** @flags: Flags for accessing the MMIO */
+ u32 flags;
+ /** @mask: Value of a mask to apply if mask with value is set */
+ u32 mask;
+#define GUC_REGSET_MASKED BIT(0)
+#define GUC_REGSET_STEERING_NEEDED BIT(1)
+#define GUC_REGSET_MASKED_WITH_VALUE BIT(2)
+#define GUC_REGSET_RESTORE_ONLY BIT(3)
+#define GUC_REGSET_STEERING_GROUP GENMASK(16, 12)
+#define GUC_REGSET_STEERING_INSTANCE GENMASK(23, 20)
+} __packed;
+
+/**
+ * struct guc_mmio_reg_set - GuC register sets
+ *
+ * GuC register sets
+ */
+struct guc_mmio_reg_set {
+ /** @address: register address */
+ u32 address;
+ /** @count: register count */
+ u16 count;
+ /** @reserved: reserved */
+ u16 reserved;
+} __packed;
+
+/**
+ * struct guc_debug_capture_list_header - Debug capture list header.
+ *
+ * Debug capture list header.
+ */
+struct guc_debug_capture_list_header {
+ /** @info: contains number of MMIO descriptors in the capture list. */
+ u32 info;
+#define GUC_CAPTURELISTHDR_NUMDESCR GENMASK(15, 0)
+} __packed;
+
+/**
+ * struct guc_debug_capture_list - Debug capture list
+ *
+ * As part of ADS registration, these header structures (followed by
+ * an array of 'struct guc_mmio_reg' entries) are used to register with
+ * GuC microkernel the list of registers we want it to dump out prior
+ * to a engine reset.
+ */
+struct guc_debug_capture_list {
+ /** @header: Debug capture list header. */
+ struct guc_debug_capture_list_header header;
+ /** @regs: MMIO descriptors in the capture list. */
+ struct guc_mmio_reg regs[];
+} __packed;
+
+/**
+ * struct guc_state_capture_header_t - State capture header.
+ *
+ * Prior to resetting engines that have hung or faulted, GuC microkernel
+ * reports the engine error-state (register values that was read) by
+ * logging them into the shared GuC log buffer using these hierarchy
+ * of structures.
+ */
+struct guc_state_capture_header_t {
+ /**
+ * @owner: VFID
+ * BR[ 7: 0] MBZ when SRIOV is disabled. When SRIOV is enabled
+ * VFID is an integer in range [0, 63] where 0 means the state capture
+ * is corresponding to the PF and an integer N in range [1, 63] means
+ * the state capture is for VF N.
+ */
+ u32 owner;
+#define GUC_STATE_CAPTURE_HEADER_VFID GENMASK(7, 0)
+ /** @info: Engine class/instance and capture type info */
+ u32 info;
+#define GUC_STATE_CAPTURE_HEADER_CAPTURE_TYPE GENMASK(3, 0) /* see guc_state_capture_type */
+#define GUC_STATE_CAPTURE_HEADER_ENGINE_CLASS GENMASK(7, 4) /* see guc_capture_list_class_type */
+#define GUC_STATE_CAPTURE_HEADER_ENGINE_INSTANCE GENMASK(11, 8)
+ /**
+ * @lrca: logical ring context address.
+ * if type-instance, LRCA (address) that hung, else set to ~0
+ */
+ u32 lrca;
+ /**
+ * @guc_id: context_index.
+ * if type-instance, context index of hung context, else set to ~0
+ */
+ u32 guc_id;
+ /** @num_mmio_entries: Number of captured MMIO entries. */
+ u32 num_mmio_entries;
+#define GUC_STATE_CAPTURE_HEADER_NUM_MMIO_ENTRIES GENMASK(9, 0)
+} __packed;
+
+/**
+ * struct guc_state_capture_t - State capture.
+ *
+ * State capture
+ */
+struct guc_state_capture_t {
+ /** @header: State capture header. */
+ struct guc_state_capture_header_t header;
+ /** @mmio_entries: Array of captured guc_mmio_reg entries. */
+ struct guc_mmio_reg mmio_entries[];
+} __packed;
+
+/* State Capture Group Type */
+enum guc_state_capture_group_type {
+ GUC_STATE_CAPTURE_GROUP_TYPE_FULL = 0,
+ GUC_STATE_CAPTURE_GROUP_TYPE_PARTIAL
+};
+
+#define GUC_STATE_CAPTURE_GROUP_TYPE_MAX (GUC_STATE_CAPTURE_GROUP_TYPE_PARTIAL + 1)
+
+/**
+ * struct guc_state_capture_group_header_t - State capture group header
+ *
+ * State capture group header.
+ */
+struct guc_state_capture_group_header_t {
+ /** @owner: VFID */
+ u32 owner;
+#define GUC_STATE_CAPTURE_GROUP_HEADER_VFID GENMASK(7, 0)
+ /** @info: Engine class/instance and capture type info */
+ u32 info;
+#define GUC_STATE_CAPTURE_GROUP_HEADER_NUM_CAPTURES GENMASK(7, 0)
+#define GUC_STATE_CAPTURE_GROUP_HEADER_CAPTURE_GROUP_TYPE GENMASK(15, 8)
+} __packed;
+
+/**
+ * struct guc_state_capture_group_t - State capture group.
+ *
+ * this is the top level structure where an error-capture dump starts
+ */
+struct guc_state_capture_group_t {
+ /** @grp_header: State capture group header. */
+ struct guc_state_capture_group_header_t grp_header;
+ /** @capture_entries: Array of state captures */
+ struct guc_state_capture_t capture_entries[];
+} __packed;
+
+#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
index 8f86a16dc577..f58198cf2cf6 100644
--- a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h
@@ -52,6 +52,7 @@ struct guc_ct_buffer_desc {
#define GUC_CTB_STATUS_OVERFLOW (1 << 0)
#define GUC_CTB_STATUS_UNDERFLOW (1 << 1)
#define GUC_CTB_STATUS_MISMATCH (1 << 2)
+#define GUC_CTB_STATUS_DISABLED (1 << 3)
u32 reserved[13];
} __packed;
static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index 6b30743a2f6c..7de8f827281f 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -132,7 +132,7 @@ enum {
* _`GUC_KLV_VGT_POLICY_SCHED_IF_IDLE` : 0x8001
* This config sets whether strict scheduling is enabled whereby any VF
* that doesn’t have work to submit is still allocated a fixed execution
- * time-slice to ensure active VFs execution is always consitent even
+ * time-slice to ensure active VFs execution is always consistent even
* during other VF reprovisiong / rebooting events. Changing this KLV
* impacts all VFs and takes effect on the next VF-Switch event.
*
@@ -207,7 +207,7 @@ enum {
* of and this will never be perfectly-exact (accumulated nano-second
* granularity) since the GPUs clock time runs off a different crystal
* from the CPUs clock. Changing this KLV on a VF that is currently
- * running a context wont take effect until a new context is scheduled in.
+ * running a context won't take effect until a new context is scheduled in.
* That said, when the PF is changing this value from 0x0 to
* a non-zero value, it might never take effect if the VF is running an
* infinitely long compute or shader kernel. In such a scenario, the
@@ -227,7 +227,7 @@ enum {
* HW is capable and this will never be perfectly-exact (accumulated
* nano-second granularity) since the GPUs clock time runs off a
* different crystal from the CPUs clock. Changing this KLV on a VF
- * that is currently running a context wont take effect until a new
+ * that is currently running a context won't take effect until a new
* context is scheduled in.
* That said, when the PF is changing this value from 0x0 to
* a non-zero value, it might never take effect if the VF is running an
@@ -291,6 +291,14 @@ enum {
*
* :0: (default)
* :1-65535: number of contexts (Gen12)
+ *
+ * _`GUC_KLV_VF_CFG_SCHED_PRIORITY` : 0x8A0C
+ * This config controls VF’s scheduling priority.
+ *
+ * :0: LOW = schedule VF only if it has active work (default)
+ * :1: NORMAL = schedule VF always, irrespective of whether it has work or not
+ * :2: HIGH = schedule VF in the next time-slice after current active
+ * time-slice completes if it has active work
*/
#define GUC_KLV_VF_CFG_GGTT_START_KEY 0x0001
@@ -343,6 +351,12 @@ enum {
#define GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_KEY 0x8a0b
#define GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_LEN 1u
+#define GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY 0x8a0c
+#define GUC_KLV_VF_CFG_SCHED_PRIORITY_LEN 1u
+#define GUC_SCHED_PRIORITY_LOW 0u
+#define GUC_SCHED_PRIORITY_NORMAL 1u
+#define GUC_SCHED_PRIORITY_HIGH 2u
+
/*
* Workaround keys:
*/
@@ -352,6 +366,8 @@ enum xe_guc_klv_ids {
GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE = 0x9007,
GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE = 0x9008,
GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET = 0x9009,
+ GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO = 0x900a,
+ GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH = 0x900b,
};
#endif
diff --git a/drivers/gpu/drm/xe/abi/guc_log_abi.h b/drivers/gpu/drm/xe/abi/guc_log_abi.h
new file mode 100644
index 000000000000..554630b7ccd9
--- /dev/null
+++ b/drivers/gpu/drm/xe/abi/guc_log_abi.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _ABI_GUC_LOG_ABI_H
+#define _ABI_GUC_LOG_ABI_H
+
+#include <linux/types.h>
+
+/* GuC logging buffer types */
+enum guc_log_buffer_type {
+ GUC_LOG_BUFFER_CRASH_DUMP,
+ GUC_LOG_BUFFER_DEBUG,
+ GUC_LOG_BUFFER_CAPTURE,
+};
+
+#define GUC_LOG_BUFFER_TYPE_MAX 3
+
+/**
+ * struct guc_log_buffer_state - GuC log buffer state
+ *
+ * Below state structure is used for coordination of retrieval of GuC firmware
+ * logs. Separate state is maintained for each log buffer type.
+ * read_ptr points to the location where Xe read last in log buffer and
+ * is read only for GuC firmware. write_ptr is incremented by GuC with number
+ * of bytes written for each log entry and is read only for Xe.
+ * When any type of log buffer becomes half full, GuC sends a flush interrupt.
+ * GuC firmware expects that while it is writing to 2nd half of the buffer,
+ * first half would get consumed by Host and then get a flush completed
+ * acknowledgment from Host, so that it does not end up doing any overwrite
+ * causing loss of logs. So when buffer gets half filled & Xe has requested
+ * for interrupt, GuC will set flush_to_file field, set the sampled_write_ptr
+ * to the value of write_ptr and raise the interrupt.
+ * On receiving the interrupt Xe should read the buffer, clear flush_to_file
+ * field and also update read_ptr with the value of sample_write_ptr, before
+ * sending an acknowledgment to GuC. marker & version fields are for internal
+ * usage of GuC and opaque to Xe. buffer_full_cnt field is incremented every
+ * time GuC detects the log buffer overflow.
+ */
+struct guc_log_buffer_state {
+ /** @marker: buffer state start marker */
+ u32 marker[2];
+ /** @read_ptr: the last byte offset that was read by KMD previously */
+ u32 read_ptr;
+ /**
+ * @write_ptr: the next byte offset location that will be written by
+ * GuC
+ */
+ u32 write_ptr;
+ /** @size: Log buffer size */
+ u32 size;
+ /**
+ * @sampled_write_ptr: Log buffer write pointer
+ * This is written by GuC to the byte offset of the next free entry in
+ * the buffer on log buffer half full or state capture notification
+ */
+ u32 sampled_write_ptr;
+ /**
+ * @wrap_offset: wraparound offset
+ * This is the byte offset of location 1 byte after last valid guc log
+ * event entry written by Guc firmware before there was a wraparound.
+ * This field is updated by guc firmware and should be used by Host
+ * when copying buffer contents to file.
+ */
+ u32 wrap_offset;
+ /** @flags: Flush to file flag and buffer full count */
+ u32 flags;
+#define GUC_LOG_BUFFER_STATE_FLUSH_TO_FILE GENMASK(0, 0)
+#define GUC_LOG_BUFFER_STATE_BUFFER_FULL_CNT GENMASK(4, 1)
+ /** @version: The Guc-Log-Entry format version */
+ u32 version;
+} __packed;
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h
deleted file mode 100644
index 710cecca972d..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h
+++ /dev/null
@@ -1 +0,0 @@
-/* Empty */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h
deleted file mode 100644
index 650ea2803a97..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _I915_GEM_MMAN_H_
-#define _I915_GEM_MMAN_H_
-
-#include "xe_bo_types.h"
-#include <drm/drm_prime.h>
-
-static inline int i915_gem_fb_mmap(struct xe_bo *bo, struct vm_area_struct *vma)
-{
- return drm_gem_prime_mmap(&bo->ttm.base, vma);
-}
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
index 777c20ceabab..8a048980ea38 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
@@ -1,64 +1,15 @@
/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2022 Intel Corporation
- */
+/* Copyright © 2025 Intel Corporation */
-#ifndef _I915_GEM_OBJECT_H_
-#define _I915_GEM_OBJECT_H_
+#ifndef __I915_GEM_OBJECT_H__
+#define __I915_GEM_OBJECT_H__
-#include <linux/types.h>
+struct dma_fence;
+struct i915_sched_attr;
-#include "xe_bo.h"
-
-#define i915_gem_object_is_shmem(obj) (0) /* We don't use shmem */
-
-static inline dma_addr_t i915_gem_object_get_dma_address(const struct xe_bo *bo, pgoff_t n)
-{
- /* Should never be called */
- WARN_ON(1);
- return n;
-}
-
-static inline bool i915_gem_object_is_tiled(const struct xe_bo *bo)
-{
- /* legacy tiling is unused */
- return false;
-}
-
-static inline bool i915_gem_object_is_userptr(const struct xe_bo *bo)
+static inline void i915_gem_fence_wait_priority(struct dma_fence *fence,
+ const struct i915_sched_attr *attr)
{
- /* legacy tiling is unused */
- return false;
-}
-
-static inline int i915_gem_object_read_from_page(struct xe_bo *bo,
- u32 ofs, u64 *ptr, u32 size)
-{
- struct ttm_bo_kmap_obj map;
- void *src;
- bool is_iomem;
- int ret;
-
- ret = xe_bo_lock(bo, true);
- if (ret)
- return ret;
-
- ret = ttm_bo_kmap(&bo->ttm, ofs >> PAGE_SHIFT, 1, &map);
- if (ret)
- goto out_unlock;
-
- ofs &= ~PAGE_MASK;
- src = ttm_kmap_obj_virtual(&map, &is_iomem);
- src += ofs;
- if (is_iomem)
- memcpy_fromio(ptr, (void __iomem *)src, size);
- else
- memcpy(ptr, src, size);
-
- ttm_bo_kunmap(&map);
-out_unlock:
- xe_bo_unlock(bo);
- return ret;
}
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h
deleted file mode 100644
index 2a3f12d2978c..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_frontbuffer.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#ifndef _I915_GEM_OBJECT_FRONTBUFFER_H_
-#define _I915_GEM_OBJECT_FRONTBUFFER_H_
-
-#define i915_gem_object_get_frontbuffer(obj) NULL
-#define i915_gem_object_set_frontbuffer(obj, front) (front)
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_types.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_types.h
deleted file mode 100644
index 7d6bb1abab73..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object_types.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/* Copyright © 2024 Intel Corporation */
-
-#ifndef __I915_GEM_OBJECT_TYPES_H__
-#define __I915_GEM_OBJECT_TYPES_H__
-
-#include "xe_bo.h"
-
-#define to_intel_bo(x) gem_to_xe_bo((x))
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
index cb6c7598824b..41d39d67817a 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
@@ -1,3 +1,8 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
#ifndef _I915_GEM_STOLEN_H_
#define _I915_GEM_STOLEN_H_
@@ -29,7 +34,7 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe),
NULL, size, start, end,
- ttm_bo_type_kernel, flags);
+ ttm_bo_type_kernel, flags, 0);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
bo = NULL;
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h b/drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h
deleted file mode 100644
index 21fec9cc837c..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef __INTEL_RPS_H__
-#define __INTEL_RPS_H__
-
-#define gen5_rps_irq_handler(x) ({})
-
-#endif /* __INTEL_RPS_H__ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h
deleted file mode 100644
index b4c47617b64b..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef __I915_DEBUGFS_H__
-#define __I915_DEBUGFS_H__
-
-struct drm_i915_gem_object;
-struct seq_file;
-
-static inline void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) {}
-
-#endif /* __I915_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index f27a2c75b56d..9b7572e06f34 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -13,7 +13,7 @@
#include <drm/drm_drv.h>
#include "i915_utils.h"
-#include "intel_runtime_pm.h"
+#include "xe_device.h" /* for xe_device_has_flat_ccs() */
#include "xe_device_types.h"
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
@@ -21,28 +21,12 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
return container_of(dev, struct drm_i915_private, drm);
}
+/* compat platform checks only for soc/ usage */
#define IS_PLATFORM(xe, x) ((xe)->info.platform == x)
-#define INTEL_INFO(dev_priv) (&((dev_priv)->info))
-#define IS_I830(dev_priv) (dev_priv && 0)
-#define IS_I845G(dev_priv) (dev_priv && 0)
-#define IS_I85X(dev_priv) (dev_priv && 0)
-#define IS_I865G(dev_priv) (dev_priv && 0)
#define IS_I915G(dev_priv) (dev_priv && 0)
#define IS_I915GM(dev_priv) (dev_priv && 0)
-#define IS_I945G(dev_priv) (dev_priv && 0)
-#define IS_I945GM(dev_priv) (dev_priv && 0)
-#define IS_I965G(dev_priv) (dev_priv && 0)
-#define IS_I965GM(dev_priv) (dev_priv && 0)
-#define IS_G45(dev_priv) (dev_priv && 0)
-#define IS_GM45(dev_priv) (dev_priv && 0)
-#define IS_G4X(dev_priv) (dev_priv && 0)
#define IS_PINEVIEW(dev_priv) (dev_priv && 0)
-#define IS_G33(dev_priv) (dev_priv && 0)
-#define IS_IRONLAKE(dev_priv) (dev_priv && 0)
-#define IS_IRONLAKE_M(dev_priv) (dev_priv && 0)
-#define IS_SANDYBRIDGE(dev_priv) (dev_priv && 0)
#define IS_IVYBRIDGE(dev_priv) (dev_priv && 0)
-#define IS_IVB_GT1(dev_priv) (dev_priv && 0)
#define IS_VALLEYVIEW(dev_priv) (dev_priv && 0)
#define IS_CHERRYVIEW(dev_priv) (dev_priv && 0)
#define IS_HASWELL(dev_priv) (dev_priv && 0)
@@ -66,56 +50,14 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
#define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_METEORLAKE)
#define IS_LUNARLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_LUNARLAKE)
#define IS_BATTLEMAGE(dev_priv) IS_PLATFORM(dev_priv, XE_BATTLEMAGE)
+#define IS_PANTHERLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_PANTHERLAKE)
#define IS_HASWELL_ULT(dev_priv) (dev_priv && 0)
#define IS_BROADWELL_ULT(dev_priv) (dev_priv && 0)
-#define IS_BROADWELL_ULX(dev_priv) (dev_priv && 0)
-
-#define IP_VER(ver, rel) ((ver) << 8 | (rel))
#define IS_MOBILE(xe) (xe && 0)
-#define IS_LP(xe) ((xe) && 0)
-#define IS_GEN9_LP(xe) ((xe) && 0)
-#define IS_GEN9_BC(xe) ((xe) && 0)
-
-#define IS_TIGERLAKE_UY(xe) (xe && 0)
-#define IS_COMETLAKE_ULX(xe) (xe && 0)
-#define IS_COFFEELAKE_ULX(xe) (xe && 0)
-#define IS_KABYLAKE_ULX(xe) (xe && 0)
-#define IS_SKYLAKE_ULX(xe) (xe && 0)
-#define IS_HASWELL_ULX(xe) (xe && 0)
-#define IS_COMETLAKE_ULT(xe) (xe && 0)
-#define IS_COFFEELAKE_ULT(xe) (xe && 0)
-#define IS_KABYLAKE_ULT(xe) (xe && 0)
-#define IS_SKYLAKE_ULT(xe) (xe && 0)
-
-#define IS_DG2_G10(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G10)
-#define IS_DG2_G11(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G11)
-#define IS_DG2_G12(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G12)
-#define IS_RAPTORLAKE_U(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_ALDERLAKE_P_RPLU)
-#define IS_ICL_WITH_PORT_F(xe) (xe && 0)
#define HAS_FLAT_CCS(xe) (xe_device_has_flat_ccs(xe))
-
#define HAS_128_BYTE_Y_TILING(xe) (xe || 1)
-#define I915_PRIORITY_DISPLAY 0
-struct i915_sched_attr {
- int priority;
-};
-#define i915_gem_fence_wait_priority(fence, attr) do { (void) attr; } while (0)
-
-#define FORCEWAKE_ALL XE_FORCEWAKE_ALL
-
-#ifdef CONFIG_ARM64
-/*
- * arm64 indirectly includes linux/rtc.h,
- * which defines a irq_lock, so include it
- * here before #define-ing it
- */
-#include <linux/rtc.h>
-#endif
-
-#define irq_lock irq.lock
-
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h
deleted file mode 100644
index 98e9dd78f670..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _I915_GPU_ERROR_H_
-#define _I915_GPU_ERROR_H_
-
-struct drm_i915_error_state_buf;
-
-__printf(2, 3)
-static inline void
-i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
-{
-}
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_gtt_view_types.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_gtt_view_types.h
new file mode 100644
index 000000000000..b261910cd6f9
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_gtt_view_types.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#include "../../i915/i915_gtt_view_types.h"
+
+/* Partial view not supported in xe, fail build if used. */
+#define I915_GTT_VIEW_PARTIAL
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h
new file mode 100644
index 000000000000..c11130440d31
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_SCHEDULER_TYPES_H__
+#define __I915_SCHEDULER_TYPES_H__
+
+#define I915_PRIORITY_DISPLAY 0
+
+struct i915_sched_attr {
+ int priority;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
index bdae8392e125..4465c40f8134 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
@@ -10,6 +10,8 @@
#include "xe_ggtt_types.h"
+#include <linux/refcount.h>
+
/* We don't want these from i915_drm.h in case of Xe */
#undef I915_TILING_X
#undef I915_TILING_Y
@@ -19,6 +21,7 @@
struct xe_bo;
struct i915_vma {
+ refcount_t ref;
struct xe_bo *bo, *dpt;
struct xe_ggtt_node *node;
};
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h
deleted file mode 100644
index e7aaf50f5485..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include <linux/types.h>
-#include <linux/build_bug.h>
-
-/* XX: Figure out how to handle this vma mapping in xe */
-struct intel_remapped_plane_info {
- /* in gtt pages */
- u32 offset:31;
- u32 linear:1;
- union {
- /* in gtt pages for !linear */
- struct {
- u16 width;
- u16 height;
- u16 src_stride;
- u16 dst_stride;
- };
-
- /* in gtt pages for linear */
- u32 size;
- };
-} __packed;
-
-struct intel_remapped_info {
- struct intel_remapped_plane_info plane[4];
- /* in gtt pages */
- u32 plane_alignment;
-} __packed;
-
-struct intel_rotation_info {
- struct intel_remapped_plane_info plane[2];
-} __packed;
-
-enum i915_gtt_view_type {
- I915_GTT_VIEW_NORMAL = 0,
- I915_GTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
- I915_GTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
-};
-
-static inline void assert_i915_gem_gtt_types(void)
-{
- BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 2 * sizeof(u32) + 8 * sizeof(u16));
- BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 5 * sizeof(u32) + 16 * sizeof(u16));
-
- /* Check that rotation/remapped shares offsets for simplicity */
- BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
- offsetof(struct intel_rotation_info, plane[0]));
- BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) !=
- offsetofend(struct intel_rotation_info, plane[1]));
-
- /* As we encode the size of each branch inside the union into its type,
- * we have to be careful that each branch has a unique size.
- */
- switch ((enum i915_gtt_view_type)0) {
- case I915_GTT_VIEW_NORMAL:
- case I915_GTT_VIEW_ROTATED:
- case I915_GTT_VIEW_REMAPPED:
- /* gcc complains if these are identical cases */
- break;
- }
-}
-
-struct i915_gtt_view {
- enum i915_gtt_view_type type;
- union {
- /* Members need to contain no holes/padding */
- struct intel_rotation_info rotated;
- struct intel_remapped_info remapped;
- };
-};
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h
deleted file mode 100644
index 8c7b315aa8ac..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef __INTEL_RUNTIME_PM_H__
-#define __INTEL_RUNTIME_PM_H__
-
-#include "intel_wakeref.h"
-#include "xe_device_types.h"
-#include "xe_pm.h"
-
-#define intel_runtime_pm xe_runtime_pm
-
-static inline void disable_rpm_wakeref_asserts(void *rpm)
-{
-}
-
-static inline void enable_rpm_wakeref_asserts(void *rpm)
-{
-}
-
-static inline intel_wakeref_t intel_runtime_pm_get(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- return xe_pm_runtime_resume_and_get(xe);
-}
-
-static inline intel_wakeref_t intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- return xe_pm_runtime_get_if_in_use(xe);
-}
-
-static inline intel_wakeref_t intel_runtime_pm_get_noresume(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- xe_pm_runtime_get_noresume(xe);
- return true;
-}
-
-static inline void intel_runtime_pm_put_unchecked(struct xe_runtime_pm *pm)
-{
- struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm);
-
- xe_pm_runtime_put(xe);
-}
-
-static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, intel_wakeref_t wakeref)
-{
- if (wakeref)
- intel_runtime_pm_put_unchecked(pm);
-}
-
-#define intel_runtime_pm_get_raw intel_runtime_pm_get
-#define intel_runtime_pm_put_raw intel_runtime_pm_put
-#define assert_rpm_wakelock_held(x) do { } while (0)
-#define assert_rpm_raw_wakeref_held(x) do { } while (0)
-
-#define with_intel_runtime_pm(rpm, wf) \
- for ((wf) = intel_runtime_pm_get(rpm); (wf); \
- intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index eb5b5f0e4bd9..0c1e88e36a1e 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -10,11 +10,18 @@
#include "xe_device_types.h"
#include "xe_mmio.h"
-static inline struct xe_gt *__compat_uncore_to_gt(struct intel_uncore *uncore)
+#define FORCEWAKE_ALL XE_FORCEWAKE_ALL
+
+static inline struct intel_uncore *to_intel_uncore(struct drm_device *drm)
+{
+ return &to_xe_device(drm)->uncore;
+}
+
+static inline struct xe_mmio *__compat_uncore_to_mmio(struct intel_uncore *uncore)
{
struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
- return xe_root_mmio_gt(xe);
+ return xe_root_tile_mmio(xe);
}
static inline struct xe_tile *__compat_uncore_to_tile(struct intel_uncore *uncore)
@@ -29,7 +36,7 @@ static inline u32 intel_uncore_read(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
}
static inline u8 intel_uncore_read8(struct intel_uncore *uncore,
@@ -37,7 +44,7 @@ static inline u8 intel_uncore_read8(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read8(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read8(__compat_uncore_to_mmio(uncore), reg);
}
static inline u16 intel_uncore_read16(struct intel_uncore *uncore,
@@ -45,7 +52,7 @@ static inline u16 intel_uncore_read16(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read16(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read16(__compat_uncore_to_mmio(uncore), reg);
}
static inline u64
@@ -57,11 +64,11 @@ intel_uncore_read64_2x32(struct intel_uncore *uncore,
u32 upper, lower, old_upper;
int loop = 0;
- upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
+ upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg);
do {
old_upper = upper;
- lower = xe_mmio_read32(__compat_uncore_to_gt(uncore), lower_reg);
- upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg);
+ lower = xe_mmio_read32(__compat_uncore_to_mmio(uncore), lower_reg);
+ upper = xe_mmio_read32(__compat_uncore_to_mmio(uncore), upper_reg);
} while (upper != old_upper && loop++ < 2);
return (u64)upper << 32 | lower;
@@ -72,7 +79,7 @@ static inline void intel_uncore_posting_read(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+ xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
}
static inline void intel_uncore_write(struct intel_uncore *uncore,
@@ -80,7 +87,7 @@ static inline void intel_uncore_write(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
+ xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
}
static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
@@ -88,7 +95,7 @@ static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_rmw32(__compat_uncore_to_gt(uncore), reg, clear, set);
+ return xe_mmio_rmw32(__compat_uncore_to_mmio(uncore), reg, clear, set);
}
static inline int intel_wait_for_register(struct intel_uncore *uncore,
@@ -97,7 +104,7 @@ static inline int intel_wait_for_register(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
+ return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
timeout * USEC_PER_MSEC, NULL, false);
}
@@ -107,7 +114,7 @@ static inline int intel_wait_for_register_fw(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
+ return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
timeout * USEC_PER_MSEC, NULL, false);
}
@@ -117,10 +124,19 @@ __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
unsigned int slow_timeout_ms, u32 *out_value)
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
+ bool atomic;
- return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value,
+ /*
+ * Replicate the behavior from i915 here, in which sleep is not
+ * performed if slow_timeout_ms == 0. This is necessary because
+ * of some paths in display code where waits are done in atomic
+ * context.
+ */
+ atomic = !slow_timeout_ms && fast_timeout_us > 0;
+
+ return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
fast_timeout_us + 1000 * slow_timeout_ms,
- out_value, false);
+ out_value, atomic);
}
static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
@@ -128,7 +144,7 @@ static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
}
static inline void intel_uncore_write_fw(struct intel_uncore *uncore,
@@ -136,7 +152,7 @@ static inline void intel_uncore_write_fw(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
+ xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
}
static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore,
@@ -144,7 +160,7 @@ static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg);
+ return xe_mmio_read32(__compat_uncore_to_mmio(uncore), reg);
}
static inline void intel_uncore_write_notrace(struct intel_uncore *uncore,
@@ -152,33 +168,9 @@ static inline void intel_uncore_write_notrace(struct intel_uncore *uncore,
{
struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
- xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
+ xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
}
-static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore)
-{
- struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
-
- return xe_device_get_root_tile(xe)->mmio.regs;
-}
-
-/*
- * The raw_reg_{read,write} macros are intended as a micro-optimization for
- * interrupt handlers so that the pointer indirection on uncore->regs can
- * be computed once (and presumably cached in a register) instead of generating
- * extra load instructions for each MMIO access.
- *
- * Given that these macros are only intended for non-GSI interrupt registers
- * (and the goal is to avoid extra instructions generated by the compiler),
- * these macros do not account for uncore->gsi_offset. Any caller that needs
- * to use these macros on a GSI register is responsible for adding the
- * appropriate GSI offset to the 'base' parameter.
- */
-#define raw_reg_read(base, reg) \
- readl(base + i915_mmio_reg_offset(reg))
-#define raw_reg_write(base, reg, value) \
- writel(value, base + i915_mmio_reg_offset(reg))
-
#define intel_uncore_forcewake_get(x, y) do { } while (0)
#define intel_uncore_forcewake_put(x, y) do { } while (0)
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore_trace.h
index d429d421ac70..d429d421ac70 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore_trace.h
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h
index ecb1c0707706..2a32faea9db5 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h
@@ -5,4 +5,6 @@
#include <linux/types.h>
-typedef unsigned long intel_wakeref_t;
+typedef struct ref_tracker *intel_wakeref_t;
+
+#define INTEL_WAKEREF_DEF ERR_PTR(-ENOENT)
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h b/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h
index c2c30ece8f77..97fd0ddf0b3a 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h
@@ -9,20 +9,21 @@
#include <linux/errno.h>
#include <linux/types.h>
-struct drm_i915_gem_object;
-struct intel_pxp;
+#include "xe_pxp.h"
-static inline int intel_pxp_key_check(struct intel_pxp *pxp,
- struct drm_i915_gem_object *obj,
- bool assign)
-{
- return -ENODEV;
-}
+struct drm_gem_object;
-static inline bool
-i915_gem_object_is_protected(const struct drm_i915_gem_object *obj)
+static inline int intel_pxp_key_check(struct drm_gem_object *obj, bool assign)
{
- return false;
+ /*
+ * The assign variable is used in i915 to assign the key to the BO at
+ * first submission time. In Xe the key is instead assigned at BO
+ * creation time, so the assign variable must always be false.
+ */
+ if (assign)
+ return -EINVAL;
+
+ return xe_pxp_obj_key_check(obj);
}
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h b/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h
deleted file mode 100644
index 9c46556d33a4..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h
+++ /dev/null
@@ -1,6 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "../../../i915/soc/intel_pch.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_rom.h b/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_rom.h
new file mode 100644
index 000000000000..05cbfb697b2b
--- /dev/null
+++ b/drivers/gpu/drm/xe/compat-i915-headers/soc/intel_rom.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "../../../i915/soc/intel_rom.h"
diff --git a/drivers/gpu/drm/xe/display/ext/i915_irq.c b/drivers/gpu/drm/xe/display/ext/i915_irq.c
index eb40f1cb44f6..3c6bca66ddab 100644
--- a/drivers/gpu/drm/xe/display/ext/i915_irq.c
+++ b/drivers/gpu/drm/xe/display/ext/i915_irq.c
@@ -7,25 +7,24 @@
#include "i915_reg.h"
#include "intel_uncore.h"
-void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
- i915_reg_t iir, i915_reg_t ier)
+void gen2_irq_reset(struct intel_uncore *uncore, struct i915_irq_regs regs)
{
- intel_uncore_write(uncore, imr, 0xffffffff);
- intel_uncore_posting_read(uncore, imr);
+ intel_uncore_write(uncore, regs.imr, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.imr);
- intel_uncore_write(uncore, ier, 0);
+ intel_uncore_write(uncore, regs.ier, 0);
/* IIR can theoretically queue up two events. Be paranoid. */
- intel_uncore_write(uncore, iir, 0xffffffff);
- intel_uncore_posting_read(uncore, iir);
- intel_uncore_write(uncore, iir, 0xffffffff);
- intel_uncore_posting_read(uncore, iir);
+ intel_uncore_write(uncore, regs.iir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.iir);
+ intel_uncore_write(uncore, regs.iir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.iir);
}
/*
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/
-void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
+void gen2_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
{
struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
u32 val = intel_uncore_read(uncore, reg);
@@ -42,32 +41,42 @@ void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
intel_uncore_posting_read(uncore, reg);
}
-void gen3_irq_init(struct intel_uncore *uncore,
- i915_reg_t imr, u32 imr_val,
- i915_reg_t ier, u32 ier_val,
- i915_reg_t iir)
+void gen2_irq_init(struct intel_uncore *uncore, struct i915_irq_regs regs,
+ u32 imr_val, u32 ier_val)
{
- gen3_assert_iir_is_zero(uncore, iir);
+ gen2_assert_iir_is_zero(uncore, regs.iir);
- intel_uncore_write(uncore, ier, ier_val);
- intel_uncore_write(uncore, imr, imr_val);
- intel_uncore_posting_read(uncore, imr);
+ intel_uncore_write(uncore, regs.ier, ier_val);
+ intel_uncore_write(uncore, regs.imr, imr_val);
+ intel_uncore_posting_read(uncore, regs.imr);
+}
+
+void gen2_error_reset(struct intel_uncore *uncore, struct i915_error_regs regs)
+{
+ intel_uncore_write(uncore, regs.emr, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.emr);
+
+ intel_uncore_write(uncore, regs.eir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.eir);
+ intel_uncore_write(uncore, regs.eir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.eir);
+}
+
+void gen2_error_init(struct intel_uncore *uncore, struct i915_error_regs regs,
+ u32 emr_val)
+{
+ intel_uncore_write(uncore, regs.eir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.eir);
+ intel_uncore_write(uncore, regs.eir, 0xffffffff);
+ intel_uncore_posting_read(uncore, regs.eir);
+
+ intel_uncore_write(uncore, regs.emr, emr_val);
+ intel_uncore_posting_read(uncore, regs.emr);
}
bool intel_irqs_enabled(struct xe_device *xe)
{
- /*
- * XXX: i915 has a racy handling of the irq.enabled, since it doesn't
- * lock its transitions. Because of that, the irq.enabled sometimes
- * is not read with the irq.lock in place.
- * However, the most critical cases like vblank and page flips are
- * properly using the locks.
- * We cannot take the lock in here or run any kind of assert because
- * of i915 inconsistency.
- * But at this point the xe irq is better protected against races,
- * although the full solution would be protecting the i915 side.
- */
- return xe->irq.enabled;
+ return atomic_read(&xe->irq.enabled);
}
void intel_synchronize_irq(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c
new file mode 100644
index 000000000000..27437c22bd70
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/intel_bo.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2024 Intel Corporation */
+
+#include <drm/drm_gem.h>
+
+#include "xe_bo.h"
+#include "intel_bo.h"
+
+bool intel_bo_is_tiled(struct drm_gem_object *obj)
+{
+ /* legacy tiling is unused */
+ return false;
+}
+
+bool intel_bo_is_userptr(struct drm_gem_object *obj)
+{
+ /* xe does not have userptr bos */
+ return false;
+}
+
+bool intel_bo_is_shmem(struct drm_gem_object *obj)
+{
+ return false;
+}
+
+bool intel_bo_is_protected(struct drm_gem_object *obj)
+{
+ return xe_bo_is_protected(gem_to_xe_bo(obj));
+}
+
+void intel_bo_flush_if_display(struct drm_gem_object *obj)
+{
+}
+
+int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ return drm_gem_prime_mmap(obj, vma);
+}
+
+int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
+{
+ struct xe_bo *bo = gem_to_xe_bo(obj);
+
+ return xe_bo_read(bo, offset, dst, size);
+}
+
+struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj)
+{
+ return NULL;
+}
+
+struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
+ struct intel_frontbuffer *front)
+{
+ return front;
+}
+
+void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
+{
+ /* FIXME */
+}
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c
index 63ce97cc4cfe..ebdb22c9499d 100644
--- a/drivers/gpu/drm/xe/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c
@@ -11,8 +11,10 @@
#include "intel_fb_bo.h"
#include "xe_bo.h"
-void intel_fb_bo_framebuffer_fini(struct xe_bo *bo)
+void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj)
{
+ struct xe_bo *bo = gem_to_xe_bo(obj);
+
if (bo->flags & XE_BO_FLAG_PINNED) {
/* Unpin our kernel fb first */
xe_bo_lock(bo, false);
@@ -22,10 +24,11 @@ void intel_fb_bo_framebuffer_fini(struct xe_bo *bo)
xe_bo_put(bo);
}
-int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
- struct xe_bo *bo,
+int intel_fb_bo_framebuffer_init(struct drm_framebuffer *fb,
+ struct drm_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd)
{
+ struct xe_bo *bo = gem_to_xe_bo(obj);
struct xe_device *xe = to_xe_device(bo->ttm.base.dev);
int ret;
@@ -47,10 +50,10 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
/*
* XE_BO_FLAG_SCANOUT should ideally be set at creation, or is
* automatically set when creating FB. We cannot change caching
- * mode when the boect is VM_BINDed, so we can only set
+ * mode when the bo is VM_BINDed, so we can only set
* coherency with display when unbound.
*/
- if (XE_IOCTL_DBG(xe, !list_empty(&bo->ttm.base.gpuva.list))) {
+ if (XE_IOCTL_DBG(xe, xe_bo_is_vm_bound(bo))) {
ttm_bo_unreserve(&bo->ttm);
ret = -EINVAL;
goto err;
@@ -65,11 +68,12 @@ err:
return ret;
}
-struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
+struct drm_gem_object *intel_fb_bo_lookup_valid_bo(struct drm_device *drm,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct drm_i915_gem_object *bo;
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_bo *bo;
struct drm_gem_object *gem = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
if (!gem)
@@ -77,12 +81,12 @@ struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
bo = gem_to_xe_bo(gem);
/* Require vram placement or dma-buf import */
- if (IS_DGFX(i915) &&
- !xe_bo_can_migrate(gem_to_xe_bo(gem), XE_PL_VRAM0) &&
+ if (IS_DGFX(xe) &&
+ !xe_bo_can_migrate(bo, XE_PL_VRAM0) &&
bo->ttm.type != ttm_bo_type_sg) {
drm_gem_object_put(gem);
return ERR_PTR(-EREMOTE);
}
- return bo;
+ return gem;
}
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.h b/drivers/gpu/drm/xe/display/intel_fb_bo.h
deleted file mode 100644
index 5d365b925b7a..000000000000
--- a/drivers/gpu/drm/xe/display/intel_fb_bo.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2021 Intel Corporation
- */
-
-#ifndef __INTEL_FB_BO_H__
-#define __INTEL_FB_BO_H__
-
-struct drm_file;
-struct drm_mode_fb_cmd2;
-struct drm_i915_private;
-struct intel_framebuffer;
-struct xe_bo;
-
-void intel_fb_bo_framebuffer_fini(struct xe_bo *bo);
-int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
- struct xe_bo *bo,
- struct drm_mode_fb_cmd2 *mode_cmd);
-
-struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd);
-
-#endif
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index 99499d6c0256..e8191562d122 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -6,6 +6,7 @@
#include <drm/drm_fb_helper.h>
#include "intel_display_types.h"
+#include "intel_fb.h"
#include "intel_fbdev_fb.h"
#include "xe_bo.h"
#include "xe_ttm_stolen_mgr.h"
@@ -20,7 +21,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
struct drm_device *dev = helper->dev;
struct xe_device *xe = to_xe_device(dev);
struct drm_mode_fb_cmd2 mode_cmd = {};
- struct drm_i915_gem_object *obj;
+ struct xe_bo *obj;
int size;
/* we don't do packed 24bpp */
@@ -44,7 +45,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_STOLEN |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_GGTT);
if (!IS_ERR(obj))
drm_info(&xe->drm, "Allocated fbdev into stolen\n");
else
@@ -55,7 +56,7 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_GGTT);
}
if (IS_ERR(obj)) {
@@ -64,13 +65,13 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
goto err;
}
- fb = intel_framebuffer_create(obj, &mode_cmd);
+ fb = intel_framebuffer_create(&obj->ttm.base, &mode_cmd);
if (IS_ERR(fb)) {
xe_bo_unpin_map_no_vm(obj);
goto err;
}
- drm_gem_object_put(intel_bo_to_drm_bo(obj));
+ drm_gem_object_put(&obj->ttm.base);
return to_intel_framebuffer(fb);
@@ -78,10 +79,11 @@ err:
return ERR_CAST(fb);
}
-int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
- struct drm_i915_gem_object *obj, struct i915_vma *vma)
+int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
+ struct drm_gem_object *_obj, struct i915_vma *vma)
{
- struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ struct xe_bo *obj = gem_to_xe_bo(_obj);
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
if (!(obj->flags & XE_BO_FLAG_SYSTEM)) {
if (obj->flags & XE_BO_FLAG_STOLEN)
@@ -100,7 +102,7 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info
XE_WARN_ON(iosys_map_is_null(&obj->vmap));
info->screen_base = obj->vmap.vaddr_iomem;
- info->screen_size = intel_bo_to_drm_bo(obj)->size;
+ info->screen_size = obj->ttm.base.size;
return 0;
}
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index 75736faf2a80..9f4ade25787a 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -4,16 +4,18 @@
*/
#include "xe_display.h"
-#include "regs/xe_regs.h"
+#include "regs/xe_irq_regs.h"
#include <linux/fb.h>
+#include <drm/drm_client.h>
+#include <drm/drm_client_event.h>
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
+#include <drm/drm_probe_helper.h>
#include <uapi/drm/xe_drm.h>
#include "soc/intel_dram.h"
-#include "i915_drv.h" /* FIXME: HAS_DISPLAY() depends on this */
#include "intel_acpi.h"
#include "intel_audio.h"
#include "intel_bw.h"
@@ -22,19 +24,21 @@
#include "intel_display_irq.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
+#include "intel_dmc_wl.h"
#include "intel_dp.h"
#include "intel_encoder.h"
#include "intel_fbdev.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
#include "intel_opregion.h"
+#include "skl_watermark.h"
#include "xe_module.h"
/* Xe device functions */
static bool has_display(struct xe_device *xe)
{
- return HAS_DISPLAY(xe);
+ return HAS_DISPLAY(&xe->display);
}
/**
@@ -65,6 +69,10 @@ void xe_display_driver_set_hooks(struct drm_driver *driver)
if (!xe_modparam.probe_display)
return;
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ driver->fbdev_probe = intel_fbdev_driver_fbdev_probe;
+#endif
+
driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC;
}
@@ -96,35 +104,13 @@ int xe_display_create(struct xe_device *xe)
spin_lock_init(&xe->display.fb_tracking.lock);
xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
+ if (!xe->display.hotplug.dp_wq)
+ return -ENOMEM;
return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
}
-static void xe_display_fini_nommio(struct drm_device *dev, void *dummy)
-{
- struct xe_device *xe = to_xe_device(dev);
-
- if (!xe->info.probe_display)
- return;
-
- intel_power_domains_cleanup(xe);
-}
-
-int xe_display_init_nommio(struct xe_device *xe)
-{
- if (!xe->info.probe_display)
- return 0;
-
- /* Fake uncore lock */
- spin_lock_init(&xe->uncore.lock);
-
- /* This must be called before any calls to HAS_PCH_* */
- intel_detect_pch(xe);
-
- return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe);
-}
-
-static void xe_display_fini_noirq(void *arg)
+static void xe_display_fini_early(void *arg)
{
struct xe_device *xe = arg;
struct intel_display *display = &xe->display;
@@ -132,11 +118,13 @@ static void xe_display_fini_noirq(void *arg)
if (!xe->info.probe_display)
return;
- intel_display_driver_remove_noirq(xe);
+ intel_display_driver_remove_nogem(display);
+ intel_display_driver_remove_noirq(display);
intel_opregion_cleanup(display);
+ intel_power_domains_cleanup(display);
}
-int xe_display_init_noirq(struct xe_device *xe)
+int xe_display_init_early(struct xe_device *xe)
{
struct intel_display *display = &xe->display;
int err;
@@ -144,7 +132,10 @@ int xe_display_init_noirq(struct xe_device *xe)
if (!xe->info.probe_display)
return 0;
- intel_display_driver_early_probe(xe);
+ /* Fake uncore lock */
+ spin_lock_init(&xe->uncore.lock);
+
+ intel_display_driver_early_probe(display);
/* Early display init.. */
intel_opregion_setup(display);
@@ -155,99 +146,86 @@ int xe_display_init_noirq(struct xe_device *xe)
*/
intel_dram_detect(xe);
- intel_bw_init_hw(xe);
+ intel_bw_init_hw(display);
- intel_display_device_info_runtime_init(xe);
+ intel_display_device_info_runtime_init(display);
- err = intel_display_driver_probe_noirq(xe);
- if (err) {
- intel_opregion_cleanup(display);
- return err;
- }
+ err = intel_display_driver_probe_noirq(display);
+ if (err)
+ goto err_opregion;
- return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_noirq, xe);
+ err = intel_display_driver_probe_nogem(display);
+ if (err)
+ goto err_noirq;
+
+ return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_early, xe);
+err_noirq:
+ intel_display_driver_remove_noirq(display);
+ intel_power_domains_cleanup(display);
+err_opregion:
+ intel_opregion_cleanup(display);
+ return err;
}
-static void xe_display_fini_noaccel(void *arg)
+static void xe_display_fini(void *arg)
{
struct xe_device *xe = arg;
+ struct intel_display *display = &xe->display;
- if (!xe->info.probe_display)
- return;
-
- intel_display_driver_remove_nogem(xe);
+ intel_hpd_poll_fini(display);
+ intel_hdcp_component_fini(display);
+ intel_audio_deinit(display);
+ intel_display_driver_remove(display);
}
-int xe_display_init_noaccel(struct xe_device *xe)
+int xe_display_init(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
int err;
if (!xe->info.probe_display)
return 0;
- err = intel_display_driver_probe_nogem(xe);
+ err = intel_display_driver_probe(display);
if (err)
return err;
- return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_noaccel, xe);
-}
-
-int xe_display_init(struct xe_device *xe)
-{
- if (!xe->info.probe_display)
- return 0;
-
- return intel_display_driver_probe(xe);
-}
-
-void xe_display_fini(struct xe_device *xe)
-{
- if (!xe->info.probe_display)
- return;
-
- intel_hpd_poll_fini(xe);
-
- intel_hdcp_component_fini(xe);
- intel_audio_deinit(xe);
+ return devm_add_action_or_reset(xe->drm.dev, xe_display_fini, xe);
}
void xe_display_register(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- intel_display_driver_register(xe);
- intel_register_dsm_handler();
- intel_power_domains_enable(xe);
+ intel_display_driver_register(display);
+ intel_power_domains_enable(display);
}
void xe_display_unregister(struct xe_device *xe)
{
- if (!xe->info.probe_display)
- return;
-
- intel_unregister_dsm_handler();
- intel_power_domains_disable(xe);
- intel_display_driver_unregister(xe);
-}
+ struct intel_display *display = &xe->display;
-void xe_display_driver_remove(struct xe_device *xe)
-{
if (!xe->info.probe_display)
return;
- intel_display_driver_remove(xe);
+ intel_power_domains_disable(display);
+ intel_display_driver_unregister(display);
}
/* IRQ-related functions */
void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
if (master_ctl & DISPLAY_IRQ)
- gen11_display_irq_handler(xe);
+ gen11_display_irq_handler(display);
}
void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
@@ -263,19 +241,23 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
void xe_display_irq_reset(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- gen11_display_irq_reset(xe);
+ gen11_display_irq_reset(display);
}
void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
if (gt->info.id == XE_GT0)
- gen11_de_irq_postinstall(xe);
+ gen11_de_irq_postinstall(display);
}
static bool suspend_to_idle(void)
@@ -308,22 +290,58 @@ static void xe_display_flush_cleanup_work(struct xe_device *xe)
}
}
-/* TODO: System and runtime suspend/resume sequences will be sanitized as a follow-up. */
-void xe_display_pm_runtime_suspend(struct xe_device *xe)
+static void xe_display_enable_d3cold(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- if (xe->d3cold.allowed)
- xe_display_pm_suspend(xe, true);
+ /*
+ * We do a lot of poking in a lot of registers, make sure they work
+ * properly.
+ */
+ intel_power_domains_disable(display);
+
+ xe_display_flush_cleanup_work(xe);
+
+ intel_opregion_suspend(display, PCI_D3cold);
+
+ intel_dmc_suspend(display);
+
+ if (has_display(xe))
+ intel_hpd_poll_enable(display);
+}
+
+static void xe_display_disable_d3cold(struct xe_device *xe)
+{
+ struct intel_display *display = &xe->display;
+
+ if (!xe->info.probe_display)
+ return;
- intel_hpd_poll_enable(xe);
+ intel_dmc_resume(display);
+
+ if (has_display(xe))
+ drm_mode_config_reset(&xe->drm);
+
+ intel_display_driver_init_hw(display);
+
+ intel_hpd_init(display);
+
+ if (has_display(xe))
+ intel_hpd_poll_disable(display);
+
+ intel_opregion_resume(display);
+
+ intel_power_domains_enable(display);
}
-void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
+void xe_display_pm_suspend(struct xe_device *xe)
{
struct intel_display *display = &xe->display;
bool s2idle = suspend_to_idle();
+
if (!xe->info.probe_display)
return;
@@ -331,113 +349,201 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
* We do a lot of poking in a lot of registers, make sure they work
* properly.
*/
- intel_power_domains_disable(xe);
- intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
- if (!runtime && has_display(xe)) {
+ intel_power_domains_disable(display);
+ drm_client_dev_suspend(&xe->drm, false);
+
+ if (has_display(xe)) {
drm_kms_helper_poll_disable(&xe->drm);
- intel_display_driver_disable_user_access(xe);
- intel_display_driver_suspend(xe);
+ intel_display_driver_disable_user_access(display);
+ intel_display_driver_suspend(display);
}
xe_display_flush_cleanup_work(xe);
- intel_dp_mst_suspend(xe);
-
- intel_hpd_cancel_work(xe);
+ intel_hpd_cancel_work(display);
- if (!runtime && has_display(xe)) {
- intel_display_driver_suspend_access(xe);
+ if (has_display(xe)) {
+ intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(&xe->display);
}
intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
- intel_dmc_suspend(xe);
+ intel_dmc_suspend(display);
}
-void xe_display_pm_suspend_late(struct xe_device *xe)
+void xe_display_pm_shutdown(struct xe_device *xe)
{
- bool s2idle = suspend_to_idle();
+ struct intel_display *display = &xe->display;
+
+ if (!xe->info.probe_display)
+ return;
+
+ intel_power_domains_disable(display);
+ drm_client_dev_suspend(&xe->drm, false);
+
+ if (has_display(xe)) {
+ drm_kms_helper_poll_disable(&xe->drm);
+ intel_display_driver_disable_user_access(display);
+ intel_display_driver_suspend(display);
+ }
+
+ xe_display_flush_cleanup_work(xe);
+ intel_dp_mst_suspend(display);
+ intel_hpd_cancel_work(display);
+
+ if (has_display(xe))
+ intel_display_driver_suspend_access(display);
+
+ intel_encoder_suspend_all(display);
+ intel_encoder_shutdown_all(display);
+
+ intel_opregion_suspend(display, PCI_D3cold);
+
+ intel_dmc_suspend(display);
+}
+
+void xe_display_pm_runtime_suspend(struct xe_device *xe)
+{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- intel_power_domains_suspend(xe, s2idle);
+ if (xe->d3cold.allowed) {
+ xe_display_enable_d3cold(xe);
+ return;
+ }
- intel_display_power_suspend_late(xe);
+ intel_hpd_poll_enable(display);
}
-void xe_display_pm_runtime_resume(struct xe_device *xe)
+void xe_display_pm_suspend_late(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+ bool s2idle = suspend_to_idle();
+
if (!xe->info.probe_display)
return;
- intel_hpd_poll_disable(xe);
+ intel_display_power_suspend_late(display, s2idle);
+}
+
+void xe_display_pm_runtime_suspend_late(struct xe_device *xe)
+{
+ struct intel_display *display = &xe->display;
+
+ if (!xe->info.probe_display)
+ return;
if (xe->d3cold.allowed)
- xe_display_pm_resume(xe, true);
+ xe_display_pm_suspend_late(xe);
+
+ /*
+ * If xe_display_pm_suspend_late() is not called, it is likely
+ * that we will be on dynamic DC states with DMC wakelock enabled. We
+ * need to flush the release work in that case.
+ */
+ intel_dmc_wl_flush_release_work(display);
}
-void xe_display_pm_resume_early(struct xe_device *xe)
+void xe_display_pm_shutdown_late(struct xe_device *xe)
{
+ struct intel_display *display = &xe->display;
+
if (!xe->info.probe_display)
return;
- intel_display_power_resume_early(xe);
+ /*
+ * The only requirement is to reboot with display DC states disabled,
+ * for now leaving all display power wells in the INIT power domain
+ * enabled.
+ */
+ intel_power_domains_driver_remove(display);
+}
+
+void xe_display_pm_resume_early(struct xe_device *xe)
+{
+ struct intel_display *display = &xe->display;
+
+ if (!xe->info.probe_display)
+ return;
- intel_power_domains_resume(xe);
+ intel_display_power_resume_early(display);
}
-void xe_display_pm_resume(struct xe_device *xe, bool runtime)
+void xe_display_pm_resume(struct xe_device *xe)
{
struct intel_display *display = &xe->display;
if (!xe->info.probe_display)
return;
- intel_dmc_resume(xe);
+ intel_dmc_resume(display);
if (has_display(xe))
drm_mode_config_reset(&xe->drm);
- intel_display_driver_init_hw(xe);
- intel_hpd_init(xe);
+ intel_display_driver_init_hw(display);
- if (!runtime && has_display(xe))
- intel_display_driver_resume_access(xe);
+ if (has_display(xe))
+ intel_display_driver_resume_access(display);
+
+ intel_hpd_init(display);
- /* MST sideband requires HPD interrupts enabled */
- intel_dp_mst_resume(xe);
- if (!runtime && has_display(xe)) {
- intel_display_driver_resume(xe);
+ if (has_display(xe)) {
+ intel_display_driver_resume(display);
drm_kms_helper_poll_enable(&xe->drm);
- intel_display_driver_enable_user_access(xe);
- intel_hpd_poll_disable(xe);
+ intel_display_driver_enable_user_access(display);
}
+ if (has_display(xe))
+ intel_hpd_poll_disable(display);
+
intel_opregion_resume(display);
- intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false);
+ drm_client_dev_resume(&xe->drm, false);
- intel_power_domains_enable(xe);
+ intel_power_domains_enable(display);
}
+void xe_display_pm_runtime_resume(struct xe_device *xe)
+{
+ struct intel_display *display = &xe->display;
+
+ if (!xe->info.probe_display)
+ return;
+
+ if (xe->d3cold.allowed) {
+ xe_display_disable_d3cold(xe);
+ return;
+ }
+
+ intel_hpd_init(display);
+ intel_hpd_poll_disable(display);
+ skl_watermark_ipc_update(display);
+}
+
+
static void display_device_remove(struct drm_device *dev, void *arg)
{
- struct xe_device *xe = arg;
+ struct intel_display *display = arg;
- intel_display_device_remove(xe);
+ intel_display_device_remove(display);
}
int xe_display_probe(struct xe_device *xe)
{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct intel_display *display;
int err;
if (!xe->info.probe_display)
goto no_display;
- intel_display_device_probe(xe);
+ display = intel_display_device_probe(pdev);
- err = drmm_add_action_or_reset(&xe->drm, display_device_remove, xe);
+ err = drmm_add_action_or_reset(&xe->drm, display_device_remove, display);
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h
index 53d727fd792b..46e14f8dee28 100644
--- a/drivers/gpu/drm/xe/display/xe_display.h
+++ b/drivers/gpu/drm/xe/display/xe_display.h
@@ -14,17 +14,13 @@ struct drm_driver;
bool xe_display_driver_probe_defer(struct pci_dev *pdev);
void xe_display_driver_set_hooks(struct drm_driver *driver);
-void xe_display_driver_remove(struct xe_device *xe);
int xe_display_create(struct xe_device *xe);
int xe_display_probe(struct xe_device *xe);
-int xe_display_init_nommio(struct xe_device *xe);
-int xe_display_init_noirq(struct xe_device *xe);
-int xe_display_init_noaccel(struct xe_device *xe);
+int xe_display_init_early(struct xe_device *xe);
int xe_display_init(struct xe_device *xe);
-void xe_display_fini(struct xe_device *xe);
void xe_display_register(struct xe_device *xe);
void xe_display_unregister(struct xe_device *xe);
@@ -34,11 +30,14 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir);
void xe_display_irq_reset(struct xe_device *xe);
void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt);
-void xe_display_pm_suspend(struct xe_device *xe, bool runtime);
+void xe_display_pm_suspend(struct xe_device *xe);
+void xe_display_pm_shutdown(struct xe_device *xe);
void xe_display_pm_suspend_late(struct xe_device *xe);
+void xe_display_pm_shutdown_late(struct xe_device *xe);
void xe_display_pm_resume_early(struct xe_device *xe);
-void xe_display_pm_resume(struct xe_device *xe, bool runtime);
+void xe_display_pm_resume(struct xe_device *xe);
void xe_display_pm_runtime_suspend(struct xe_device *xe);
+void xe_display_pm_runtime_suspend_late(struct xe_device *xe);
void xe_display_pm_runtime_resume(struct xe_device *xe);
#else
@@ -51,11 +50,8 @@ static inline int xe_display_create(struct xe_device *xe) { return 0; }
static inline int xe_display_probe(struct xe_device *xe) { return 0; }
-static inline int xe_display_init_nommio(struct xe_device *xe) { return 0; }
-static inline int xe_display_init_noirq(struct xe_device *xe) { return 0; }
-static inline int xe_display_init_noaccel(struct xe_device *xe) { return 0; }
+static inline int xe_display_init_early(struct xe_device *xe) { return 0; }
static inline int xe_display_init(struct xe_device *xe) { return 0; }
-static inline void xe_display_fini(struct xe_device *xe) {}
static inline void xe_display_register(struct xe_device *xe) {}
static inline void xe_display_unregister(struct xe_device *xe) {}
@@ -65,11 +61,14 @@ static inline void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir)
static inline void xe_display_irq_reset(struct xe_device *xe) {}
static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {}
-static inline void xe_display_pm_suspend(struct xe_device *xe, bool runtime) {}
+static inline void xe_display_pm_suspend(struct xe_device *xe) {}
+static inline void xe_display_pm_shutdown(struct xe_device *xe) {}
static inline void xe_display_pm_suspend_late(struct xe_device *xe) {}
+static inline void xe_display_pm_shutdown_late(struct xe_device *xe) {}
static inline void xe_display_pm_resume_early(struct xe_device *xe) {}
-static inline void xe_display_pm_resume(struct xe_device *xe, bool runtime) {}
+static inline void xe_display_pm_resume(struct xe_device *xe) {}
static inline void xe_display_pm_runtime_suspend(struct xe_device *xe) {}
+static inline void xe_display_pm_runtime_suspend_late(struct xe_device *xe) {}
static inline void xe_display_pm_runtime_resume(struct xe_device *xe) {}
#endif /* CONFIG_DRM_XE_DISPLAY */
diff --git a/drivers/gpu/drm/xe/display/xe_display_rpm.c b/drivers/gpu/drm/xe/display/xe_display_rpm.c
new file mode 100644
index 000000000000..1955153aadba
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_rpm.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include "intel_display_rpm.h"
+#include "xe_device_types.h"
+#include "xe_pm.h"
+
+static struct xe_device *display_to_xe(struct intel_display *display)
+{
+ return container_of(display, struct xe_device, display);
+}
+
+struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
+{
+ return intel_display_rpm_get(display);
+}
+
+void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ intel_display_rpm_put(display, wakeref);
+}
+
+struct ref_tracker *intel_display_rpm_get(struct intel_display *display)
+{
+ return xe_pm_runtime_resume_and_get(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL;
+}
+
+struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display)
+{
+ return xe_pm_runtime_get_if_in_use(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL;
+}
+
+struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display)
+{
+ xe_pm_runtime_get_noresume(display_to_xe(display));
+
+ return INTEL_WAKEREF_DEF;
+}
+
+void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref)
+{
+ if (wakeref)
+ xe_pm_runtime_put(display_to_xe(display));
+}
+
+void intel_display_rpm_put_unchecked(struct intel_display *display)
+{
+ xe_pm_runtime_put(display_to_xe(display));
+}
+
+bool intel_display_rpm_suspended(struct intel_display *display)
+{
+ struct xe_device *xe = display_to_xe(display);
+
+ return pm_runtime_suspended(xe->drm.dev);
+}
+
+void assert_display_rpm_held(struct intel_display *display)
+{
+ /* FIXME */
+}
+
+void intel_display_rpm_assert_block(struct intel_display *display)
+{
+ /* FIXME */
+}
+
+void intel_display_rpm_assert_unblock(struct intel_display *display)
+{
+ /* FIXME */
+}
diff --git a/drivers/gpu/drm/xe/display/xe_display_rps.c b/drivers/gpu/drm/xe/display/xe_display_rps.c
deleted file mode 100644
index ab21c581c192..000000000000
--- a/drivers/gpu/drm/xe/display/xe_display_rps.c
+++ /dev/null
@@ -1,17 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "intel_display_rps.h"
-
-void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc,
- struct dma_fence *fence)
-{
-}
-
-void intel_display_rps_mark_interactive(struct drm_i915_private *i915,
- struct intel_atomic_state *state,
- bool interactive)
-{
-}
diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c
index 68e3d1959ad6..2933ca97d673 100644
--- a/drivers/gpu/drm/xe/display/xe_display_wa.c
+++ b/drivers/gpu/drm/xe/display/xe_display_wa.c
@@ -10,7 +10,9 @@
#include <generated/xe_wa_oob.h>
-bool intel_display_needs_wa_16023588340(struct drm_i915_private *i915)
+bool intel_display_needs_wa_16023588340(struct intel_display *display)
{
- return XE_WA(xe_root_mmio_gt(i915), 16023588340);
+ struct xe_device *xe = to_xe_device(display->drm);
+
+ return XE_WA(xe_root_mmio_gt(xe), 16023588340);
}
diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
index f99d901a3214..9f941fc2e36b 100644
--- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
+++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
@@ -17,10 +17,7 @@ u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
{
- struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
-
iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val);
- xe_device_l2_flush(xe);
}
u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
@@ -30,12 +27,9 @@ u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
{
- struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
-
WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size);
- xe_device_l2_flush(xe);
}
bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size)
@@ -48,11 +42,12 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d
if (!vma)
return false;
+ /* Set scanout flag for WC mapping */
obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe),
NULL, PAGE_ALIGN(size),
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_FLAG_GGTT);
+ XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT);
if (IS_ERR(obj)) {
kfree(vma);
return false;
@@ -73,5 +68,12 @@ void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
{
- /* TODO: add xe specific flush_map() for dsb buffer object. */
+ struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
+
+ /*
+ * The memory barrier here is to ensure coherency of DSB vs MMIO,
+ * both for weak ordering archs and discrete cards.
+ */
+ xe_device_wmb(xe);
+ xe_device_l2_flush(xe);
}
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index b58fc4ba2aac..55259969480b 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -5,10 +5,12 @@
#include <drm/ttm/ttm_bo.h>
+#include "i915_vma.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
+#include "intel_fbdev.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_ggtt.h"
@@ -79,12 +81,14 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs,
static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
const struct i915_gtt_view *view,
- struct i915_vma *vma)
+ struct i915_vma *vma,
+ unsigned int alignment)
{
struct xe_device *xe = to_xe_device(fb->base.dev);
struct xe_tile *tile0 = xe_device_get_root_tile(xe);
struct xe_ggtt *ggtt = tile0->mem.ggtt;
- struct xe_bo *bo = intel_fb_obj(&fb->base), *dpt;
+ struct drm_gem_object *obj = intel_fb_bo(&fb->base);
+ struct xe_bo *bo = gem_to_xe_bo(obj), *dpt;
u32 dpt_size, size = bo->ttm.base.size;
if (view->type == I915_GTT_VIEW_NORMAL)
@@ -98,23 +102,29 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
XE_PAGE_SIZE);
if (IS_DGFX(xe))
- dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM0 |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_PAGETABLE);
+ dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
+ dpt_size, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM0 |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE,
+ alignment);
else
- dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
- ttm_bo_type_kernel,
- XE_BO_FLAG_STOLEN |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_PAGETABLE);
+ dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
+ dpt_size, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_STOLEN |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE,
+ alignment);
if (IS_ERR(dpt))
- dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_PAGETABLE);
+ dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
+ dpt_size, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE,
+ alignment);
if (IS_ERR(dpt))
return PTR_ERR(dpt);
@@ -153,7 +163,10 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
}
vma->dpt = dpt;
- vma->node = dpt->ggtt_node;
+ vma->node = dpt->ggtt_node[tile0->id];
+
+ /* Ensure DPT writes are flushed */
+ xe_device_l2_flush(xe);
return 0;
}
@@ -183,9 +196,11 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo
static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
const struct i915_gtt_view *view,
- struct i915_vma *vma)
+ struct i915_vma *vma,
+ unsigned int alignment)
{
- struct xe_bo *bo = intel_fb_obj(&fb->base);
+ struct drm_gem_object *obj = intel_fb_bo(&fb->base);
+ struct xe_bo *bo = gem_to_xe_bo(obj);
struct xe_device *xe = to_xe_device(fb->base.dev);
struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
u32 align;
@@ -203,8 +218,8 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
align = max_t(u32, align, SZ_64K);
- if (bo->ggtt_node && view->type == I915_GTT_VIEW_NORMAL) {
- vma->node = bo->ggtt_node;
+ if (bo->ggtt_node[ggtt->tile->id] && view->type == I915_GTT_VIEW_NORMAL) {
+ vma->node = bo->ggtt_node[ggtt->tile->id];
} else if (view->type == I915_GTT_VIEW_NORMAL) {
u32 x, size = bo->ttm.base.size;
@@ -264,17 +279,20 @@ out:
}
static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
- const struct i915_gtt_view *view)
+ const struct i915_gtt_view *view,
+ unsigned int alignment)
{
struct drm_device *dev = fb->base.dev;
struct xe_device *xe = to_xe_device(dev);
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
- struct xe_bo *bo = intel_fb_obj(&fb->base);
+ struct drm_gem_object *obj = intel_fb_bo(&fb->base);
+ struct xe_bo *bo = gem_to_xe_bo(obj);
int ret;
if (!vma)
return ERR_PTR(-ENODEV);
+ refcount_set(&vma->ref, 1);
if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) &&
intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 &&
!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) {
@@ -312,14 +330,12 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
vma->bo = bo;
if (intel_fb_uses_dpt(&fb->base))
- ret = __xe_pin_fb_vma_dpt(fb, view, vma);
+ ret = __xe_pin_fb_vma_dpt(fb, view, vma, alignment);
else
- ret = __xe_pin_fb_vma_ggtt(fb, view, vma);
+ ret = __xe_pin_fb_vma_ggtt(fb, view, vma, alignment);
if (ret)
goto err_unpin;
- /* Ensure DPT writes are flushed */
- xe_device_l2_flush(xe);
return vma;
err_unpin:
@@ -333,10 +349,15 @@ err:
static void __xe_unpin_fb_vma(struct i915_vma *vma)
{
+ u8 tile_id = vma->node->ggtt->tile->id;
+
+ if (!refcount_dec_and_test(&vma->ref))
+ return;
+
if (vma->dpt)
xe_bo_unpin_map_no_vm(vma->dpt);
- else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node) ||
- vma->bo->ggtt_node->base.start != vma->node->base.start)
+ else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node[tile_id]) ||
+ vma->bo->ggtt_node[tile_id]->base.start != vma->node->base.start)
xe_ggtt_node_remove(vma->node, false);
ttm_bo_reserve(&vma->bo->ttm, false, false, NULL);
@@ -350,12 +371,13 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
const struct i915_gtt_view *view,
unsigned int alignment,
unsigned int phys_alignment,
+ unsigned int vtd_guard,
bool uses_fence,
unsigned long *out_flags)
{
*out_flags = 0;
- return __xe_pin_fb_vma(to_intel_framebuffer(fb), view);
+ return __xe_pin_fb_vma(to_intel_framebuffer(fb), view, phys_alignment);
}
void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags)
@@ -363,20 +385,58 @@ void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags)
__xe_unpin_fb_vma(vma);
}
-int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+static bool reuse_vma(struct intel_plane_state *new_plane_state,
+ const struct intel_plane_state *old_plane_state)
+{
+ struct intel_framebuffer *fb = to_intel_framebuffer(new_plane_state->hw.fb);
+ struct xe_device *xe = to_xe_device(fb->base.dev);
+ struct i915_vma *vma;
+
+ if (old_plane_state->hw.fb == new_plane_state->hw.fb &&
+ !memcmp(&old_plane_state->view.gtt,
+ &new_plane_state->view.gtt,
+ sizeof(new_plane_state->view.gtt))) {
+ vma = old_plane_state->ggtt_vma;
+ goto found;
+ }
+
+ if (fb == intel_fbdev_framebuffer(xe->display.fbdev.fbdev)) {
+ vma = intel_fbdev_vma_pointer(xe->display.fbdev.fbdev);
+ if (vma)
+ goto found;
+ }
+
+ return false;
+
+found:
+ refcount_inc(&vma->ref);
+ new_plane_state->ggtt_vma = vma;
+ return true;
+}
+
+int intel_plane_pin_fb(struct intel_plane_state *new_plane_state,
+ const struct intel_plane_state *old_plane_state)
{
- struct drm_framebuffer *fb = plane_state->hw.fb;
- struct xe_bo *bo = intel_fb_obj(fb);
+ struct drm_framebuffer *fb = new_plane_state->hw.fb;
+ struct drm_gem_object *obj = intel_fb_bo(fb);
+ struct xe_bo *bo = gem_to_xe_bo(obj);
struct i915_vma *vma;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
+ unsigned int alignment = plane->min_alignment(plane, fb, 0);
+
+ if (reuse_vma(new_plane_state, old_plane_state))
+ return 0;
/* We reject creating !SCANOUT fb's, so this is weird.. */
drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT));
- vma = __xe_pin_fb_vma(to_intel_framebuffer(fb), &plane_state->view.gtt);
+ vma = __xe_pin_fb_vma(intel_fb, &new_plane_state->view.gtt, alignment);
+
if (IS_ERR(vma))
return PTR_ERR(vma);
- plane_state->ggtt_vma = vma;
+ new_plane_state->ggtt_vma = vma;
return 0;
}
diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
index 6619a40aed15..b35a6f201d4a 100644
--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
@@ -9,7 +9,6 @@
#include "abi/gsc_command_header_abi.h"
#include "intel_hdcp_gsc.h"
-#include "intel_hdcp_gsc_message.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_device_types.h"
@@ -22,7 +21,8 @@
#define HECI_MEADDRESS_HDCP 18
-struct intel_hdcp_gsc_message {
+struct intel_hdcp_gsc_context {
+ struct xe_device *xe;
struct xe_bo *hdcp_bo;
u64 hdcp_cmd_in;
u64 hdcp_cmd_out;
@@ -30,26 +30,24 @@ struct intel_hdcp_gsc_message {
#define HDCP_GSC_HEADER_SIZE sizeof(struct intel_gsc_mtl_header)
-bool intel_hdcp_gsc_cs_required(struct xe_device *xe)
-{
- return DISPLAY_VER(xe) >= 14;
-}
-
-bool intel_hdcp_gsc_check_status(struct xe_device *xe)
+bool intel_hdcp_gsc_check_status(struct drm_device *drm)
{
+ struct xe_device *xe = to_xe_device(drm);
struct xe_tile *tile = xe_device_get_root_tile(xe);
struct xe_gt *gt = tile->media_gt;
struct xe_gsc *gsc = &gt->uc.gsc;
bool ret = true;
+ unsigned int fw_ref;
- if (!gsc && !xe_uc_fw_is_enabled(&gsc->fw)) {
+ if (!gsc || !xe_uc_fw_is_enabled(&gsc->fw)) {
drm_dbg_kms(&xe->drm,
"GSC Components not ready for HDCP2.x\n");
return false;
}
xe_pm_runtime_get(xe);
- if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC)) {
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
+ if (!fw_ref) {
drm_dbg_kms(&xe->drm,
"failed to get forcewake to check proxy status\n");
ret = false;
@@ -59,7 +57,7 @@ bool intel_hdcp_gsc_check_status(struct xe_device *xe)
if (!xe_gsc_proxy_init_done(gsc))
ret = false;
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
out:
xe_pm_runtime_put(xe);
return ret;
@@ -67,7 +65,7 @@ out:
/*This function helps allocate memory for the command that we will send to gsc cs */
static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
- struct intel_hdcp_gsc_message *hdcp_message)
+ struct intel_hdcp_gsc_context *gsc_context)
{
struct xe_bo *bo = NULL;
u64 cmd_in, cmd_out;
@@ -89,104 +87,60 @@ static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
cmd_out = cmd_in + PAGE_SIZE;
xe_map_memset(xe, &bo->vmap, 0, 0, bo->size);
- hdcp_message->hdcp_bo = bo;
- hdcp_message->hdcp_cmd_in = cmd_in;
- hdcp_message->hdcp_cmd_out = cmd_out;
+ gsc_context->hdcp_bo = bo;
+ gsc_context->hdcp_cmd_in = cmd_in;
+ gsc_context->hdcp_cmd_out = cmd_out;
+ gsc_context->xe = xe;
+
out:
return ret;
}
-static int intel_hdcp_gsc_hdcp2_init(struct xe_device *xe)
+struct intel_hdcp_gsc_context *intel_hdcp_gsc_context_alloc(struct drm_device *drm)
{
- struct intel_hdcp_gsc_message *hdcp_message;
+ struct xe_device *xe = to_xe_device(drm);
+ struct intel_hdcp_gsc_context *gsc_context;
int ret;
- hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL);
-
- if (!hdcp_message)
- return -ENOMEM;
+ gsc_context = kzalloc(sizeof(*gsc_context), GFP_KERNEL);
+ if (!gsc_context)
+ return ERR_PTR(-ENOMEM);
/*
* NOTE: No need to lock the comp mutex here as it is already
* going to be taken before this function called
*/
- ret = intel_hdcp_gsc_initialize_message(xe, hdcp_message);
+ ret = intel_hdcp_gsc_initialize_message(xe, gsc_context);
if (ret) {
- drm_err(&xe->drm, "Could not initialize hdcp_message\n");
- kfree(hdcp_message);
- return ret;
+ drm_err(&xe->drm, "Could not initialize gsc_context\n");
+ kfree(gsc_context);
+ gsc_context = ERR_PTR(ret);
}
- xe->display.hdcp.hdcp_message = hdcp_message;
- return ret;
-}
-
-static const struct i915_hdcp_ops gsc_hdcp_ops = {
- .initiate_hdcp2_session = intel_hdcp_gsc_initiate_session,
- .verify_receiver_cert_prepare_km =
- intel_hdcp_gsc_verify_receiver_cert_prepare_km,
- .verify_hprime = intel_hdcp_gsc_verify_hprime,
- .store_pairing_info = intel_hdcp_gsc_store_pairing_info,
- .initiate_locality_check = intel_hdcp_gsc_initiate_locality_check,
- .verify_lprime = intel_hdcp_gsc_verify_lprime,
- .get_session_key = intel_hdcp_gsc_get_session_key,
- .repeater_check_flow_prepare_ack =
- intel_hdcp_gsc_repeater_check_flow_prepare_ack,
- .verify_mprime = intel_hdcp_gsc_verify_mprime,
- .enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication,
- .close_hdcp_session = intel_hdcp_gsc_close_session,
-};
-
-int intel_hdcp_gsc_init(struct xe_device *xe)
-{
- struct i915_hdcp_arbiter *data;
- int ret;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- mutex_lock(&xe->display.hdcp.hdcp_mutex);
- xe->display.hdcp.arbiter = data;
- xe->display.hdcp.arbiter->hdcp_dev = xe->drm.dev;
- xe->display.hdcp.arbiter->ops = &gsc_hdcp_ops;
- ret = intel_hdcp_gsc_hdcp2_init(xe);
- if (ret)
- kfree(data);
-
- mutex_unlock(&xe->display.hdcp.hdcp_mutex);
-
- return ret;
+ return gsc_context;
}
-void intel_hdcp_gsc_fini(struct xe_device *xe)
+void intel_hdcp_gsc_context_free(struct intel_hdcp_gsc_context *gsc_context)
{
- struct intel_hdcp_gsc_message *hdcp_message =
- xe->display.hdcp.hdcp_message;
- struct i915_hdcp_arbiter *arb = xe->display.hdcp.arbiter;
-
- if (hdcp_message) {
- xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo);
- kfree(hdcp_message);
- xe->display.hdcp.hdcp_message = NULL;
- }
+ if (!gsc_context)
+ return;
- kfree(arb);
- xe->display.hdcp.arbiter = NULL;
+ xe_bo_unpin_map_no_vm(gsc_context->hdcp_bo);
+ kfree(gsc_context);
}
static int xe_gsc_send_sync(struct xe_device *xe,
- struct intel_hdcp_gsc_message *hdcp_message,
+ struct intel_hdcp_gsc_context *gsc_context,
u32 msg_size_in, u32 msg_size_out,
u32 addr_out_off)
{
- struct xe_gt *gt = hdcp_message->hdcp_bo->tile->media_gt;
- struct iosys_map *map = &hdcp_message->hdcp_bo->vmap;
+ struct xe_gt *gt = gsc_context->hdcp_bo->tile->media_gt;
+ struct iosys_map *map = &gsc_context->hdcp_bo->vmap;
struct xe_gsc *gsc = &gt->uc.gsc;
int ret;
- ret = xe_gsc_pkt_submit_kernel(gsc, hdcp_message->hdcp_cmd_in, msg_size_in,
- hdcp_message->hdcp_cmd_out, msg_size_out);
+ ret = xe_gsc_pkt_submit_kernel(gsc, gsc_context->hdcp_cmd_in, msg_size_in,
+ gsc_context->hdcp_cmd_out, msg_size_out);
if (ret) {
drm_err(&xe->drm, "failed to send gsc HDCP msg (%d)\n", ret);
return ret;
@@ -201,12 +155,12 @@ static int xe_gsc_send_sync(struct xe_device *xe,
return ret;
}
-ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
- size_t msg_in_len, u8 *msg_out,
- size_t msg_out_len)
+ssize_t intel_hdcp_gsc_msg_send(struct intel_hdcp_gsc_context *gsc_context,
+ void *msg_in, size_t msg_in_len,
+ void *msg_out, size_t msg_out_len)
{
+ struct xe_device *xe = gsc_context->xe;
const size_t max_msg_size = PAGE_SIZE - HDCP_GSC_HEADER_SIZE;
- struct intel_hdcp_gsc_message *hdcp_message;
u64 host_session_id;
u32 msg_size_in, msg_size_out;
u32 addr_out_off, addr_in_wr_off = 0;
@@ -219,15 +173,14 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
msg_size_in = msg_in_len + HDCP_GSC_HEADER_SIZE;
msg_size_out = msg_out_len + HDCP_GSC_HEADER_SIZE;
- hdcp_message = xe->display.hdcp.hdcp_message;
addr_out_off = PAGE_SIZE;
host_session_id = xe_gsc_create_host_session_id();
xe_pm_runtime_get_noresume(xe);
- addr_in_wr_off = xe_gsc_emit_header(xe, &hdcp_message->hdcp_bo->vmap,
+ addr_in_wr_off = xe_gsc_emit_header(xe, &gsc_context->hdcp_bo->vmap,
addr_in_wr_off, HECI_MEADDRESS_HDCP,
host_session_id, msg_in_len);
- xe_map_memcpy_to(xe, &hdcp_message->hdcp_bo->vmap, addr_in_wr_off,
+ xe_map_memcpy_to(xe, &gsc_context->hdcp_bo->vmap, addr_in_wr_off,
msg_in, msg_in_len);
/*
* Keep sending request in case the pending bit is set no need to add
@@ -236,7 +189,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
* 20 times each message 50 ms apart
*/
do {
- ret = xe_gsc_send_sync(xe, hdcp_message, msg_size_in, msg_size_out,
+ ret = xe_gsc_send_sync(xe, gsc_context, msg_size_in, msg_size_out,
addr_out_off);
/* Only try again if gsc says so */
@@ -250,7 +203,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
if (ret)
goto out;
- xe_map_memcpy_from(xe, msg_out, &hdcp_message->hdcp_bo->vmap,
+ xe_map_memcpy_from(xe, msg_out, &gsc_context->hdcp_bo->vmap,
addr_out_off + HDCP_GSC_HEADER_SIZE,
msg_out_len);
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index a50ab9eae40a..6502b8274173 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -8,7 +8,9 @@
#include "regs/xe_gtt_defs.h"
#include "xe_ggtt.h"
+#include "xe_mmio.h"
+#include "i915_reg.h"
#include "intel_atomic_plane.h"
#include "intel_crtc.h"
#include "intel_display.h"
@@ -22,6 +24,21 @@
#include <generated/xe_wa_oob.h>
+void intel_plane_initial_vblank_wait(struct intel_crtc *crtc)
+{
+ /* Early xe has no irq */
+ struct xe_device *xe = to_xe_device(crtc->base.dev);
+ struct xe_reg pipe_frmtmstmp = XE_REG(i915_mmio_reg_offset(PIPE_FRMTMSTMP(crtc->pipe)));
+ u32 timestamp;
+ int ret;
+
+ timestamp = xe_mmio_read32(xe_root_tile_mmio(xe), pipe_frmtmstmp);
+
+ ret = xe_mmio_wait32_not(xe_root_tile_mmio(xe), pipe_frmtmstmp, ~0U, timestamp, 40000U, &timestamp, false);
+ if (ret < 0)
+ drm_warn(&xe->drm, "waiting for early vblank failed with %i\n", ret);
+}
+
static bool
intel_reuse_initial_plane_obj(struct intel_crtc *this,
const struct intel_initial_plane_config plane_configs[],
@@ -66,7 +83,7 @@ initial_plane_bo(struct xe_device *xe,
if (plane_config->size == 0)
return NULL;
- flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT;
+ flags = XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT;
base = round_down(plane_config->base, page_size);
if (IS_DGFX(xe)) {
@@ -170,7 +187,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
return false;
if (intel_framebuffer_init(to_intel_framebuffer(fb),
- bo, &mode_cmd)) {
+ &bo->ttm.base, &mode_cmd)) {
drm_dbg_kms(&xe->drm, "intel fb init failed\n");
goto err_bo;
}
@@ -194,8 +211,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
struct drm_framebuffer *fb;
struct i915_vma *vma;
@@ -217,7 +232,7 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
plane_state->uapi.rotation, &plane_state->view);
vma = intel_fb_pin_to_ggtt(fb, &plane_state->view.gtt,
- 0, 0, false, &plane_state->flags);
+ 0, 0, 0, false, &plane_state->flags);
if (IS_ERR(vma))
goto nofb;
@@ -241,14 +256,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
plane_config->vma = vma;
-
- /*
- * Flip to the newly created mapping ASAP, so we can re-use the
- * first part of GGTT for WOPCM, prevent flickering, and prevent
- * the lookup of sysmem scratch pages.
- */
- plane->check_plane(crtc_state, plane_state);
- plane->async_flip(plane, crtc_state, plane_state, true);
return;
nofb:
@@ -275,12 +282,12 @@ static void plane_config_fini(struct intel_initial_plane_config *plane_config)
}
}
-void intel_initial_plane_config(struct drm_i915_private *i915)
+void intel_initial_plane_config(struct intel_display *display)
{
struct intel_initial_plane_config plane_configs[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
- for_each_intel_crtc(&i915->drm, crtc) {
+ for_each_intel_crtc(display->drm, crtc) {
struct intel_initial_plane_config *plane_config =
&plane_configs[crtc->pipe];
@@ -294,7 +301,7 @@ void intel_initial_plane_config(struct drm_i915_private *i915)
* can even allow for smooth boot transitions if the BIOS
* fb is large enough for the active pipe configuration.
*/
- i915->display.funcs.display->get_initial_plane_config(crtc, plane_config);
+ display->funcs.display->get_initial_plane_config(crtc, plane_config);
/*
* If the fb is shared between multiple heads, we'll
@@ -302,8 +309,8 @@ void intel_initial_plane_config(struct drm_i915_private *i915)
*/
intel_find_initial_plane_obj(crtc, plane_configs);
- if (i915->display.funcs.display->fixup_initial_plane_config(crtc, plane_config))
- intel_crtc_wait_for_next_vblank(crtc);
+ if (display->funcs.display->fixup_initial_plane_config(crtc, plane_config))
+ intel_plane_initial_vblank_wait(crtc);
plane_config_fini(plane_config);
}
diff --git a/drivers/gpu/drm/xe/display/xe_tdf.c b/drivers/gpu/drm/xe/display/xe_tdf.c
index 2c0d4e144e09..2a7fccbeb1d5 100644
--- a/drivers/gpu/drm/xe/display/xe_tdf.c
+++ b/drivers/gpu/drm/xe/display/xe_tdf.c
@@ -7,7 +7,9 @@
#include "intel_display_types.h"
#include "intel_tdf.h"
-void intel_td_flush(struct drm_i915_private *i915)
+void intel_td_flush(struct intel_display *display)
{
- xe_device_td_flush(i915);
+ struct xe_device *xe = to_xe_device(display->drm);
+
+ xe_device_td_flush(xe);
}
diff --git a/drivers/gpu/drm/xe/instructions/xe_alu_commands.h b/drivers/gpu/drm/xe/instructions/xe_alu_commands.h
new file mode 100644
index 000000000000..2987b10d3e16
--- /dev/null
+++ b/drivers/gpu/drm/xe/instructions/xe_alu_commands.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_ALU_COMMANDS_H_
+#define _XE_ALU_COMMANDS_H_
+
+#include "instructions/xe_instr_defs.h"
+
+/* Instruction Opcodes */
+#define CS_ALU_OPCODE_NOOP 0x000
+#define CS_ALU_OPCODE_FENCE_RD 0x001
+#define CS_ALU_OPCODE_FENCE_WR 0x002
+#define CS_ALU_OPCODE_LOAD 0x080
+#define CS_ALU_OPCODE_LOADINV 0x480
+#define CS_ALU_OPCODE_LOAD0 0x081
+#define CS_ALU_OPCODE_LOAD1 0x481
+#define CS_ALU_OPCODE_LOADIND 0x082
+#define CS_ALU_OPCODE_ADD 0x100
+#define CS_ALU_OPCODE_SUB 0x101
+#define CS_ALU_OPCODE_AND 0x102
+#define CS_ALU_OPCODE_OR 0x103
+#define CS_ALU_OPCODE_XOR 0x104
+#define CS_ALU_OPCODE_SHL 0x105
+#define CS_ALU_OPCODE_SHR 0x106
+#define CS_ALU_OPCODE_SAR 0x107
+#define CS_ALU_OPCODE_STORE 0x180
+#define CS_ALU_OPCODE_STOREINV 0x580
+#define CS_ALU_OPCODE_STOREIND 0x181
+
+/* Instruction Operands */
+#define CS_ALU_OPERAND_REG(n) REG_FIELD_PREP(GENMASK(3, 0), (n))
+#define CS_ALU_OPERAND_REG0 0x0
+#define CS_ALU_OPERAND_REG1 0x1
+#define CS_ALU_OPERAND_REG2 0x2
+#define CS_ALU_OPERAND_REG3 0x3
+#define CS_ALU_OPERAND_REG4 0x4
+#define CS_ALU_OPERAND_REG5 0x5
+#define CS_ALU_OPERAND_REG6 0x6
+#define CS_ALU_OPERAND_REG7 0x7
+#define CS_ALU_OPERAND_REG8 0x8
+#define CS_ALU_OPERAND_REG9 0x9
+#define CS_ALU_OPERAND_REG10 0xa
+#define CS_ALU_OPERAND_REG11 0xb
+#define CS_ALU_OPERAND_REG12 0xc
+#define CS_ALU_OPERAND_REG13 0xd
+#define CS_ALU_OPERAND_REG14 0xe
+#define CS_ALU_OPERAND_REG15 0xf
+#define CS_ALU_OPERAND_SRCA 0x20
+#define CS_ALU_OPERAND_SRCB 0x21
+#define CS_ALU_OPERAND_ACCU 0x31
+#define CS_ALU_OPERAND_ZF 0x32
+#define CS_ALU_OPERAND_CF 0x33
+#define CS_ALU_OPERAND_NA 0 /* N/A operand */
+
+/* Command Streamer ALU Instructions */
+#define CS_ALU_INSTR(opcode, op1, op2) (REG_FIELD_PREP(GENMASK(31, 20), (opcode)) | \
+ REG_FIELD_PREP(GENMASK(19, 10), (op1)) | \
+ REG_FIELD_PREP(GENMASK(9, 0), (op2)))
+
+#define __CS_ALU_INSTR(opcode, op1, op2) CS_ALU_INSTR(CS_ALU_OPCODE_##opcode, \
+ CS_ALU_OPERAND_##op1, \
+ CS_ALU_OPERAND_##op2)
+
+#define CS_ALU_INSTR_NOOP __CS_ALU_INSTR(NOOP, NA, NA)
+#define CS_ALU_INSTR_LOAD(op1, op2) __CS_ALU_INSTR(LOAD, op1, op2)
+#define CS_ALU_INSTR_LOADINV(op1, op2) __CS_ALU_INSTR(LOADINV, op1, op2)
+#define CS_ALU_INSTR_LOAD0(op1) __CS_ALU_INSTR(LOAD0, op1, NA)
+#define CS_ALU_INSTR_LOAD1(op1) __CS_ALU_INSTR(LOAD1, op1, NA)
+#define CS_ALU_INSTR_ADD __CS_ALU_INSTR(ADD, NA, NA)
+#define CS_ALU_INSTR_SUB __CS_ALU_INSTR(SUB, NA, NA)
+#define CS_ALU_INSTR_AND __CS_ALU_INSTR(AND, NA, NA)
+#define CS_ALU_INSTR_OR __CS_ALU_INSTR(OR, NA, NA)
+#define CS_ALU_INSTR_XOR __CS_ALU_INSTR(XOR, NA, NA)
+#define CS_ALU_INSTR_STORE(op1, op2) __CS_ALU_INSTR(STORE, op1, op2)
+#define CS_ALU_INSTR_STOREINV(op1, op2) __CS_ALU_INSTR(STOREINV, op1, op2)
+
+#endif
diff --git a/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
index 31d28a67ef6a..457881af8af9 100644
--- a/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h
@@ -137,6 +137,7 @@
#define CMD_3DSTATE_CLIP_MESH GFXPIPE_3D_CMD(0x0, 0x81)
#define CMD_3DSTATE_SBE_MESH GFXPIPE_3D_CMD(0x0, 0x82)
#define CMD_3DSTATE_CPSIZE_CONTROL_BUFFER GFXPIPE_3D_CMD(0x0, 0x83)
+#define CMD_3DSTATE_COARSE_PIXEL GFXPIPE_3D_CMD(0x0, 0x89)
#define CMD_3DSTATE_DRAWING_RECTANGLE GFXPIPE_3D_CMD(0x1, 0x0)
#define CMD_3DSTATE_CHROMA_KEY GFXPIPE_3D_CMD(0x1, 0x4)
diff --git a/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h b/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
index a255946b6f77..8cfcd3360896 100644
--- a/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
@@ -41,6 +41,7 @@
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|((len)-2))
+#define PIPE_CONTROL0_L3_READ_ONLY_CACHE_INVALIDATE BIT(10) /* gen12 */
#define PIPE_CONTROL0_HDC_PIPELINE_FLUSH BIT(9) /* gen12 */
#define PIPE_CONTROL_COMMAND_CACHE_INVALIDATE (1<<29)
diff --git a/drivers/gpu/drm/xe/instructions/xe_instr_defs.h b/drivers/gpu/drm/xe/instructions/xe_instr_defs.h
index fd2ce7ace510..e559969468c4 100644
--- a/drivers/gpu/drm/xe/instructions/xe_instr_defs.h
+++ b/drivers/gpu/drm/xe/instructions/xe_instr_defs.h
@@ -16,6 +16,7 @@
#define XE_INSTR_CMD_TYPE GENMASK(31, 29)
#define XE_INSTR_MI REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x0)
#define XE_INSTR_GSC REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x2)
+#define XE_INSTR_VIDEOPIPE REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x3)
#define XE_INSTR_GFXPIPE REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x3)
#define XE_INSTR_GFX_STATE REG_FIELD_PREP(XE_INSTR_CMD_TYPE, 0x4)
diff --git a/drivers/gpu/drm/xe/instructions/xe_mfx_commands.h b/drivers/gpu/drm/xe/instructions/xe_mfx_commands.h
new file mode 100644
index 000000000000..3c0c97f78e90
--- /dev/null
+++ b/drivers/gpu/drm/xe/instructions/xe_mfx_commands.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_MFX_COMMANDS_H_
+#define _XE_MFX_COMMANDS_H_
+
+#include "instructions/xe_instr_defs.h"
+
+#define MFX_CMD_SUBTYPE REG_GENMASK(28, 27) /* A.K.A cmd pipe */
+#define MFX_CMD_OPCODE REG_GENMASK(26, 24)
+#define MFX_CMD_SUB_OPCODE REG_GENMASK(23, 16)
+#define MFX_FLAGS_AND_LEN REG_GENMASK(15, 0)
+
+#define XE_MFX_INSTR(subtype, op, sub_op) \
+ (XE_INSTR_VIDEOPIPE | \
+ REG_FIELD_PREP(MFX_CMD_SUBTYPE, subtype) | \
+ REG_FIELD_PREP(MFX_CMD_OPCODE, op) | \
+ REG_FIELD_PREP(MFX_CMD_SUB_OPCODE, sub_op))
+
+#define MFX_WAIT XE_MFX_INSTR(1, 0, 0)
+#define MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG REG_BIT(9)
+#define MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG REG_BIT(8)
+
+#define CRYPTO_KEY_EXCHANGE XE_MFX_INSTR(2, 6, 9)
+
+#endif
diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
index 10ec2920d31b..e3f5e8bb3ebc 100644
--- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
@@ -32,6 +32,7 @@
#define MI_BATCH_BUFFER_END __MI_INSTR(0xA)
#define MI_TOPOLOGY_FILTER __MI_INSTR(0xD)
#define MI_FORCE_WAKEUP __MI_INSTR(0x1D)
+#define MI_MATH(n) (__MI_INSTR(0x1A) | XE_INSTR_NUM_DW((n) + 1))
#define MI_STORE_DATA_IMM __MI_INSTR(0x20)
#define MI_SDI_GGTT REG_BIT(22)
@@ -47,7 +48,12 @@
#define MI_LRI_FORCE_POSTED REG_BIT(12)
#define MI_LRI_LEN(x) (((x) & 0xff) + 1)
+#define MI_STORE_REGISTER_MEM (__MI_INSTR(0x24) | XE_INSTR_NUM_DW(4))
+#define MI_SRM_USE_GGTT REG_BIT(22)
+#define MI_SRM_ADD_CS_OFFSET REG_BIT(19)
+
#define MI_FLUSH_DW __MI_INSTR(0x26)
+#define MI_FLUSH_DW_PROTECTED_MEM_EN REG_BIT(22)
#define MI_FLUSH_DW_STORE_INDEX REG_BIT(21)
#define MI_INVALIDATE_TLB REG_BIT(18)
#define MI_FLUSH_DW_CCS REG_BIT(16)
@@ -60,10 +66,18 @@
#define MI_LOAD_REGISTER_MEM (__MI_INSTR(0x29) | XE_INSTR_NUM_DW(4))
#define MI_LRM_USE_GGTT REG_BIT(22)
+#define MI_LOAD_REGISTER_REG (__MI_INSTR(0x2a) | XE_INSTR_NUM_DW(3))
+#define MI_LRR_DST_CS_MMIO REG_BIT(19)
+#define MI_LRR_SRC_CS_MMIO REG_BIT(18)
+
#define MI_COPY_MEM_MEM (__MI_INSTR(0x2e) | XE_INSTR_NUM_DW(5))
#define MI_COPY_MEM_MEM_SRC_GGTT REG_BIT(22)
#define MI_COPY_MEM_MEM_DST_GGTT REG_BIT(21)
#define MI_BATCH_BUFFER_START __MI_INSTR(0x31)
+#define MI_SET_APPID __MI_INSTR(0x0e)
+#define MI_SET_APPID_SESSION_ID_MASK REG_GENMASK(6, 0)
+#define MI_SET_APPID_SESSION_ID(x) REG_FIELD_PREP(MI_SET_APPID_SESSION_ID_MASK, x)
+
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index 81b71903675e..7ade41e2b7b3 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -43,6 +43,10 @@
#define XEHPC_BCS8_RING_BASE 0x3ee000
#define GSCCS_RING_BASE 0x11a000
+#define ENGINE_ID(base) XE_REG((base) + 0x8c)
+#define ENGINE_INSTANCE_ID REG_GENMASK(9, 4)
+#define ENGINE_CLASS_ID REG_GENMASK(2, 0)
+
#define RING_TAIL(base) XE_REG((base) + 0x30)
#define TAIL_ADDR REG_GENMASK(20, 3)
@@ -53,7 +57,6 @@
#define RING_CTL(base) XE_REG((base) + 0x3c)
#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
-#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
#define RING_START_UDW(base) XE_REG((base) + 0x48)
@@ -83,6 +86,8 @@
#define RING_IMR(base) XE_REG((base) + 0xa8)
#define RING_INT_STATUS_RPT_PTR(base) XE_REG((base) + 0xac)
+#define CS_INT_VEC(base) XE_REG((base) + 0x1b8)
+
#define RING_EIR(base) XE_REG((base) + 0xb0)
#define RING_EMR(base) XE_REG((base) + 0xb4)
#define RING_ESR(base) XE_REG((base) + 0xb8)
@@ -129,7 +134,12 @@
#define RING_EXECLIST_STATUS_LO(base) XE_REG((base) + 0x234)
#define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4)
+#define RING_IDLEDLY(base) XE_REG((base) + 0x23c)
+#define INHIBIT_SWITCH_UNTIL_PREEMPTED REG_BIT(31)
+#define IDLE_DELAY REG_GENMASK(20, 0)
+
#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED)
+#define CTX_CTRL_PXP_ENABLE REG_BIT(10)
#define CTX_CTRL_OAC_CONTEXT_ENABLE REG_BIT(8)
#define CTX_CTRL_RUN_ALONE REG_BIT(7)
#define CTX_CTRL_INDIRECT_RING_STATE_ENABLE REG_BIT(4)
@@ -138,6 +148,7 @@
#define RING_MODE(base) XE_REG((base) + 0x29c)
#define GFX_DISABLE_LEGACY_MODE REG_BIT(3)
+#define GFX_MSIX_INTERRUPT_ENABLE REG_BIT(13)
#define RING_TIMESTAMP(base) XE_REG((base) + 0x358)
@@ -147,6 +158,7 @@
#define STOP_RING REG_BIT(8)
#define RING_CTX_TIMESTAMP(base) XE_REG((base) + 0x3a8)
+#define RING_CTX_TIMESTAMP_UDW(base) XE_REG((base) + 0x3ac)
#define CSBE_DEBUG_STATUS(base) XE_REG((base) + 0x3fc)
#define RING_FORCE_TO_NONPRIV(base, i) XE_REG(((base) + 0x4d0) + (i) * 4)
@@ -181,11 +193,16 @@
#define PREEMPT_GPGPU_LEVEL_MASK PREEMPT_GPGPU_LEVEL(1, 1)
#define PREEMPT_3D_OBJECT_LEVEL REG_BIT(0)
+#define CS_GPR_DATA(base, n) XE_REG((base) + 0x600 + (n) * 4)
+#define CS_GPR_REG(base, n) CS_GPR_DATA((base), (n) * 2)
+#define CS_GPR_REG_UDW(base, n) CS_GPR_DATA((base), (n) * 2 + 1)
+
#define VDBOX_CGCTL3F08(base) XE_REG((base) + 0x3f08)
#define CG3DDISHRS_CLKGATE_DIS REG_BIT(5)
#define VDBOX_CGCTL3F10(base) XE_REG((base) + 0x3f10)
#define IECPUNIT_CLKGATE_DIS REG_BIT(22)
+#define RAMDFTUNIT_CLKGATE_DIS REG_BIT(9)
#define VDBOX_CGCTL3F18(base) XE_REG((base) + 0x3f18)
#define ALNUNIT_CLKGATE_DIS REG_BIT(13)
diff --git a/drivers/gpu/drm/xe/regs/xe_eu_stall_regs.h b/drivers/gpu/drm/xe/regs/xe_eu_stall_regs.h
new file mode 100644
index 000000000000..c53f57fdde65
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_eu_stall_regs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_EU_STALL_REGS_H_
+#define _XE_EU_STALL_REGS_H_
+
+#include "regs/xe_reg_defs.h"
+
+#define XEHPC_EUSTALL_BASE XE_REG_MCR(0xe520)
+#define XEHPC_EUSTALL_BASE_BUF_ADDR REG_GENMASK(31, 6)
+#define XEHPC_EUSTALL_BASE_XECORE_BUF_SZ REG_GENMASK(5, 3)
+#define XEHPC_EUSTALL_BASE_ENABLE_SAMPLING REG_BIT(1)
+
+#define XEHPC_EUSTALL_BASE_UPPER XE_REG_MCR(0xe524)
+
+#define XEHPC_EUSTALL_REPORT XE_REG_MCR(0xe528, XE_REG_OPTION_MASKED)
+#define XEHPC_EUSTALL_REPORT_WRITE_PTR_MASK REG_GENMASK(15, 2)
+#define XEHPC_EUSTALL_REPORT_OVERFLOW_DROP REG_BIT(1)
+
+#define XEHPC_EUSTALL_REPORT1 XE_REG_MCR(0xe52c, XE_REG_OPTION_MASKED)
+#define XEHPC_EUSTALL_REPORT1_READ_PTR_MASK REG_GENMASK(15, 2)
+
+#define XEHPC_EUSTALL_CTRL XE_REG_MCR(0xe53c, XE_REG_OPTION_MASKED)
+#define EUSTALL_MOCS REG_GENMASK(9, 3)
+#define EUSTALL_SAMPLE_RATE REG_GENMASK(2, 0)
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 00ad34ed73a5..5cd5ab8529c5 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -62,7 +62,6 @@
#define LE_SSE_MASK REG_GENMASK(18, 17)
#define LE_SSE(value) REG_FIELD_PREP(LE_SSE_MASK, value)
#define LE_COS_MASK REG_GENMASK(16, 15)
-#define LE_COS(value) REG_FIELD_PREP(LE_COS_MASK)
#define LE_SCF_MASK REG_BIT(14)
#define LE_SCF(value) REG_FIELD_PREP(LE_SCF_MASK, value)
#define LE_PFM_MASK REG_GENMASK(13, 11)
@@ -157,6 +156,7 @@
#define XEHPG_SC_INSTDONE_EXTRA2 XE_REG_MCR(0x7108)
#define COMMON_SLICE_CHICKEN4 XE_REG(0x7300, XE_REG_OPTION_MASKED)
+#define SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE REG_BIT(12)
#define DISABLE_TDC_LOAD_BALANCING_CALC REG_BIT(6)
#define COMMON_SLICE_CHICKEN3 XE_REG(0x7304, XE_REG_OPTION_MASKED)
@@ -221,6 +221,9 @@
#define MIRROR_FUSE1 XE_REG(0x911c)
+#define MIRROR_L3BANK_ENABLE XE_REG(0x9130)
+#define XE3_L3BANK_ENABLE REG_GENMASK(31, 0)
+
#define XELP_EU_ENABLE XE_REG(0x9134) /* "_DISABLE" on Xe_LP */
#define XELP_EU_MASK REG_GENMASK(7, 0)
#define XELP_GT_SLICE_ENABLE XE_REG(0x9138)
@@ -286,6 +289,9 @@
#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16)
#define LTCDD_CLKGATE_DIS REG_BIT(10)
+#define UNSLCGCTL9454 XE_REG(0x9454)
+#define LSCFE_CLKGATE_DIS REG_BIT(4)
+
#define XEHP_SLICE_UNIT_LEVEL_CLKGATE XE_REG_MCR(0x94d4)
#define L3_CR2X_CLKGATE_DIS REG_BIT(17)
#define L3_CLKGATE_DIS REG_BIT(16)
@@ -344,14 +350,26 @@
#define CTC_SOURCE_DIVIDE_LOGIC REG_BIT(0)
#define FORCEWAKE_RENDER XE_REG(0xa278)
+
+#define POWERGATE_DOMAIN_STATUS XE_REG(0xa2a0)
+#define MEDIA_SLICE3_AWAKE_STATUS REG_BIT(4)
+#define MEDIA_SLICE2_AWAKE_STATUS REG_BIT(3)
+#define MEDIA_SLICE1_AWAKE_STATUS REG_BIT(2)
+#define RENDER_AWAKE_STATUS REG_BIT(1)
+#define MEDIA_SLICE0_AWAKE_STATUS REG_BIT(0)
+
+#define MISC_STATUS_0 XE_REG(0xa500)
+
#define FORCEWAKE_MEDIA_VDBOX(n) XE_REG(0xa540 + (n) * 4)
#define FORCEWAKE_MEDIA_VEBOX(n) XE_REG(0xa560 + (n) * 4)
#define FORCEWAKE_GSC XE_REG(0xa618)
+#define XELP_GARBCNTL XE_REG(0xb004)
+#define XELP_BUS_HASH_CTL_BIT_EXC REG_BIT(7)
+
#define XEHPC_LNCFMISCCFGREG0 XE_REG_MCR(0xb01c, XE_REG_OPTION_MASKED)
#define XEHPC_OVRLSCCC REG_BIT(0)
-/* L3 Cache Control */
#define LNCFCMOCS_REG_COUNT 32
#define XELP_LNCFCMOCS(i) XE_REG(0xb020 + (i) * 4)
#define XEHP_LNCFCMOCS(i) XE_REG_MCR(0xb020 + (i) * 4)
@@ -374,6 +392,18 @@
#define XEHP_L3NODEARBCFG XE_REG_MCR(0xb0b4)
#define XEHP_LNESPARE REG_BIT(19)
+#define LSN_VC_REG2 XE_REG_MCR(0xb0c8)
+#define LSN_LNI_WGT_MASK REG_GENMASK(31, 28)
+#define LSN_LNI_WGT(value) REG_FIELD_PREP(LSN_LNI_WGT_MASK, value)
+#define LSN_LNE_WGT_MASK REG_GENMASK(27, 24)
+#define LSN_LNE_WGT(value) REG_FIELD_PREP(LSN_LNE_WGT_MASK, value)
+#define LSN_DIM_X_WGT_MASK REG_GENMASK(23, 20)
+#define LSN_DIM_X_WGT(value) REG_FIELD_PREP(LSN_DIM_X_WGT_MASK, value)
+#define LSN_DIM_Y_WGT_MASK REG_GENMASK(19, 16)
+#define LSN_DIM_Y_WGT(value) REG_FIELD_PREP(LSN_DIM_Y_WGT_MASK, value)
+#define LSN_DIM_Z_WGT_MASK REG_GENMASK(15, 12)
+#define LSN_DIM_Z_WGT(value) REG_FIELD_PREP(LSN_DIM_Z_WGT_MASK, value)
+
#define L3SQCREG2 XE_REG_MCR(0xb104)
#define COMPMEMRD256BOVRFETCHEN REG_BIT(20)
@@ -434,6 +464,8 @@
#define SAMPLER_MODE XE_REG_MCR(0xe18c, XE_REG_OPTION_MASKED)
#define ENABLE_SMALLPL REG_BIT(15)
+#define SMP_WAIT_FETCH_MERGING_COUNTER REG_GENMASK(11, 10)
+#define SMP_FORCE_128B_OVERFETCH REG_FIELD_PREP(SMP_WAIT_FETCH_MERGING_COUNTER, 1)
#define SC_DISABLE_POWER_OPTIMIZATION_EBB REG_BIT(9)
#define SAMPLER_ENABLE_HEADLESS_MSG REG_BIT(5)
#define INDIRECT_STATE_BASE_ADDR_OVERRIDE REG_BIT(0)
@@ -462,6 +494,7 @@
#define TDL_TSL_CHICKEN XE_REG_MCR(0xe4c4, XE_REG_OPTION_MASKED)
#define STK_ID_RESTRICT REG_BIT(12)
#define SLM_WMTP_RESTORE REG_BIT(11)
+#define RES_CHK_SPR_DIS REG_BIT(6)
#define ROW_CHICKEN XE_REG_MCR(0xe4f0, XE_REG_OPTION_MASKED)
#define UGM_BACKUP_MODE REG_BIT(13)
@@ -487,6 +520,9 @@
#define LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK REG_GENMASK(13, 11)
#define DIS_ATOMIC_CHAINING_TYPED_WRITES REG_BIT(3)
+#define TDL_CHICKEN XE_REG_MCR(0xe5f4, XE_REG_OPTION_MASKED)
+#define QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE REG_BIT(12)
+
#define LSC_CHICKEN_BIT_0 XE_REG_MCR(0xe7c8)
#define DISABLE_D8_D16_COASLESCE REG_BIT(30)
#define WR_REQ_CHAINING_DIS REG_BIT(26)
@@ -517,7 +553,7 @@
* [4-6] RSVD
* [7] Disabled
*/
-#define CCS_MODE XE_REG(0x14804)
+#define CCS_MODE XE_REG(0x14804, XE_REG_OPTION_MASKED)
#define CCS_MODE_CSLICE_0_3_MASK REG_GENMASK(11, 0) /* 3 bits per cslice */
#define CCS_MODE_CSLICE_MASK 0x7 /* CCS0-3 + rsvd */
#define CCS_MODE_CSLICE_WIDTH ilog2(CCS_MODE_CSLICE_MASK + 1)
@@ -556,62 +592,6 @@
#define GT_PERF_STATUS XE_REG(0x1381b4)
#define VOLTAGE_MASK REG_GENMASK(10, 0)
-/*
- * Note: Interrupt registers 1900xx are VF accessible only until version 12.50.
- * On newer platforms, VFs are using memory-based interrupts instead.
- * However, for simplicity we keep this XE_REG_OPTION_VF tag intact.
- */
-
-#define GT_INTR_DW(x) XE_REG(0x190018 + ((x) * 4), XE_REG_OPTION_VF)
-#define INTR_GSC REG_BIT(31)
-#define INTR_GUC REG_BIT(25)
-#define INTR_MGUC REG_BIT(24)
-#define INTR_BCS8 REG_BIT(23)
-#define INTR_BCS(x) REG_BIT(15 - (x))
-#define INTR_CCS(x) REG_BIT(4 + (x))
-#define INTR_RCS0 REG_BIT(0)
-#define INTR_VECS(x) REG_BIT(31 - (x))
-#define INTR_VCS(x) REG_BIT(x)
-
-#define RENDER_COPY_INTR_ENABLE XE_REG(0x190030, XE_REG_OPTION_VF)
-#define VCS_VECS_INTR_ENABLE XE_REG(0x190034, XE_REG_OPTION_VF)
-#define GUC_SG_INTR_ENABLE XE_REG(0x190038, XE_REG_OPTION_VF)
-#define ENGINE1_MASK REG_GENMASK(31, 16)
-#define ENGINE0_MASK REG_GENMASK(15, 0)
-#define GPM_WGBOXPERF_INTR_ENABLE XE_REG(0x19003c, XE_REG_OPTION_VF)
-#define GUNIT_GSC_INTR_ENABLE XE_REG(0x190044, XE_REG_OPTION_VF)
-#define CCS_RSVD_INTR_ENABLE XE_REG(0x190048, XE_REG_OPTION_VF)
-
-#define INTR_IDENTITY_REG(x) XE_REG(0x190060 + ((x) * 4), XE_REG_OPTION_VF)
-#define INTR_DATA_VALID REG_BIT(31)
-#define INTR_ENGINE_INSTANCE(x) REG_FIELD_GET(GENMASK(25, 20), x)
-#define INTR_ENGINE_CLASS(x) REG_FIELD_GET(GENMASK(18, 16), x)
-#define INTR_ENGINE_INTR(x) REG_FIELD_GET(GENMASK(15, 0), x)
-#define OTHER_GUC_INSTANCE 0
-#define OTHER_GSC_HECI2_INSTANCE 3
-#define OTHER_GSC_INSTANCE 6
-
-#define IIR_REG_SELECTOR(x) XE_REG(0x190070 + ((x) * 4), XE_REG_OPTION_VF)
-#define RCS0_RSVD_INTR_MASK XE_REG(0x190090, XE_REG_OPTION_VF)
-#define BCS_RSVD_INTR_MASK XE_REG(0x1900a0, XE_REG_OPTION_VF)
-#define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8, XE_REG_OPTION_VF)
-#define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac, XE_REG_OPTION_VF)
-#define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0, XE_REG_OPTION_VF)
-#define HECI2_RSVD_INTR_MASK XE_REG(0x1900e4)
-#define GUC_SG_INTR_MASK XE_REG(0x1900e8, XE_REG_OPTION_VF)
-#define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec, XE_REG_OPTION_VF)
-#define GUNIT_GSC_INTR_MASK XE_REG(0x1900f4, XE_REG_OPTION_VF)
-#define CCS0_CCS1_INTR_MASK XE_REG(0x190100)
-#define CCS2_CCS3_INTR_MASK XE_REG(0x190104)
-#define XEHPC_BCS1_BCS2_INTR_MASK XE_REG(0x190110)
-#define XEHPC_BCS3_BCS4_INTR_MASK XE_REG(0x190114)
-#define XEHPC_BCS5_BCS6_INTR_MASK XE_REG(0x190118)
-#define XEHPC_BCS7_BCS8_INTR_MASK XE_REG(0x19011c)
-#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11)
-#define GT_CONTEXT_SWITCH_INTERRUPT REG_BIT(8)
-#define GSC_ER_COMPLETE REG_BIT(5)
-#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT REG_BIT(4)
-#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3)
-#define GT_RENDER_USER_INTERRUPT REG_BIT(0)
+#define SFC_DONE(n) XE_REG(0x1cc000 + (n) * 0x1000)
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_guc_regs.h b/drivers/gpu/drm/xe/regs/xe_guc_regs.h
index a5fd14307f94..2118f7dec287 100644
--- a/drivers/gpu/drm/xe/regs/xe_guc_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_guc_regs.h
@@ -84,6 +84,8 @@
#define HUC_LOADING_AGENT_GUC REG_BIT(1)
#define GUC_WOPCM_OFFSET_VALID REG_BIT(0)
#define GUC_MAX_IDLE_COUNT XE_REG(0xc3e4)
+#define GUC_PMTIMESTAMP_LO XE_REG(0xc3e8)
+#define GUC_PMTIMESTAMP_HI XE_REG(0xc3ec)
#define GUC_SEND_INTERRUPT XE_REG(0xc4c8)
#define GUC_SEND_TRIGGER REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/regs/xe_irq_regs.h b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
new file mode 100644
index 000000000000..f0ecfcac4003
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+#ifndef _XE_IRQ_REGS_H_
+#define _XE_IRQ_REGS_H_
+
+#include "regs/xe_reg_defs.h"
+
+#define PCU_IRQ_OFFSET 0x444e0
+#define GU_MISC_IRQ_OFFSET 0x444f0
+#define GU_MISC_GSE REG_BIT(27)
+
+#define DG1_MSTR_TILE_INTR XE_REG(0x190008)
+#define DG1_MSTR_IRQ REG_BIT(31)
+#define DG1_MSTR_TILE(t) REG_BIT(t)
+
+#define GFX_MSTR_IRQ XE_REG(0x190010, XE_REG_OPTION_VF)
+#define MASTER_IRQ REG_BIT(31)
+#define GU_MISC_IRQ REG_BIT(29)
+#define DISPLAY_IRQ REG_BIT(16)
+#define GT_DW_IRQ(x) REG_BIT(x)
+
+/*
+ * Note: Interrupt registers 1900xx are VF accessible only until version 12.50.
+ * On newer platforms, VFs are using memory-based interrupts instead.
+ * However, for simplicity we keep this XE_REG_OPTION_VF tag intact.
+ */
+
+#define GT_INTR_DW(x) XE_REG(0x190018 + ((x) * 4), XE_REG_OPTION_VF)
+#define INTR_GSC REG_BIT(31)
+#define INTR_GUC REG_BIT(25)
+#define INTR_MGUC REG_BIT(24)
+#define INTR_BCS8 REG_BIT(23)
+#define INTR_BCS(x) REG_BIT(15 - (x))
+#define INTR_CCS(x) REG_BIT(4 + (x))
+#define INTR_RCS0 REG_BIT(0)
+#define INTR_VECS(x) REG_BIT(31 - (x))
+#define INTR_VCS(x) REG_BIT(x)
+
+#define RENDER_COPY_INTR_ENABLE XE_REG(0x190030, XE_REG_OPTION_VF)
+#define VCS_VECS_INTR_ENABLE XE_REG(0x190034, XE_REG_OPTION_VF)
+#define GUC_SG_INTR_ENABLE XE_REG(0x190038, XE_REG_OPTION_VF)
+#define ENGINE1_MASK REG_GENMASK(31, 16)
+#define ENGINE0_MASK REG_GENMASK(15, 0)
+#define GPM_WGBOXPERF_INTR_ENABLE XE_REG(0x19003c, XE_REG_OPTION_VF)
+#define CRYPTO_RSVD_INTR_ENABLE XE_REG(0x190040)
+#define GUNIT_GSC_INTR_ENABLE XE_REG(0x190044, XE_REG_OPTION_VF)
+#define CCS_RSVD_INTR_ENABLE XE_REG(0x190048, XE_REG_OPTION_VF)
+
+#define INTR_IDENTITY_REG(x) XE_REG(0x190060 + ((x) * 4), XE_REG_OPTION_VF)
+#define INTR_DATA_VALID REG_BIT(31)
+#define INTR_ENGINE_INSTANCE(x) REG_FIELD_GET(GENMASK(25, 20), x)
+#define INTR_ENGINE_CLASS(x) REG_FIELD_GET(GENMASK(18, 16), x)
+#define INTR_ENGINE_INTR(x) REG_FIELD_GET(GENMASK(15, 0), x)
+#define OTHER_GUC_INSTANCE 0
+#define OTHER_GSC_HECI2_INSTANCE 3
+#define OTHER_KCR_INSTANCE 4
+#define OTHER_GSC_INSTANCE 6
+
+#define IIR_REG_SELECTOR(x) XE_REG(0x190070 + ((x) * 4), XE_REG_OPTION_VF)
+#define RCS0_RSVD_INTR_MASK XE_REG(0x190090, XE_REG_OPTION_VF)
+#define BCS_RSVD_INTR_MASK XE_REG(0x1900a0, XE_REG_OPTION_VF)
+#define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8, XE_REG_OPTION_VF)
+#define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac, XE_REG_OPTION_VF)
+#define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0, XE_REG_OPTION_VF)
+#define HECI2_RSVD_INTR_MASK XE_REG(0x1900e4)
+#define GUC_SG_INTR_MASK XE_REG(0x1900e8, XE_REG_OPTION_VF)
+#define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec, XE_REG_OPTION_VF)
+#define CRYPTO_RSVD_INTR_MASK XE_REG(0x1900f0)
+#define GUNIT_GSC_INTR_MASK XE_REG(0x1900f4, XE_REG_OPTION_VF)
+#define CCS0_CCS1_INTR_MASK XE_REG(0x190100)
+#define CCS2_CCS3_INTR_MASK XE_REG(0x190104)
+#define XEHPC_BCS1_BCS2_INTR_MASK XE_REG(0x190110)
+#define XEHPC_BCS3_BCS4_INTR_MASK XE_REG(0x190114)
+#define XEHPC_BCS5_BCS6_INTR_MASK XE_REG(0x190118)
+#define XEHPC_BCS7_BCS8_INTR_MASK XE_REG(0x19011c)
+#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11)
+#define GT_CONTEXT_SWITCH_INTERRUPT REG_BIT(8)
+#define GSC_ER_COMPLETE REG_BIT(5)
+#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT REG_BIT(4)
+#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3)
+#define GT_RENDER_USER_INTERRUPT REG_BIT(0)
+
+/* irqs for OTHER_KCR_INSTANCE */
+#define KCR_PXP_STATE_TERMINATED_INTERRUPT REG_BIT(1)
+#define KCR_APP_TERMINATED_PER_FW_REQ_INTERRUPT REG_BIT(2)
+#define KCR_PXP_STATE_RESET_COMPLETE_INTERRUPT REG_BIT(3)
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
index 045dfd09db99..994af591a2e8 100644
--- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
+++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
@@ -11,7 +11,9 @@
#define CTX_RING_TAIL (0x06 + 1)
#define CTX_RING_START (0x08 + 1)
#define CTX_RING_CTL (0x0a + 1)
+#define CTX_BB_PER_CTX_PTR (0x12 + 1)
#define CTX_TIMESTAMP (0x22 + 1)
+#define CTX_TIMESTAMP_UDW (0x24 + 1)
#define CTX_INDIRECT_RING_STATE (0x26 + 1)
#define CTX_PDP0_UDW (0x30 + 1)
#define CTX_PDP0_LDW (0x32 + 1)
@@ -25,6 +27,9 @@
#define CTX_INT_SRC_REPORT_REG (CTX_LRI_INT_REPORT_PTR + 3)
#define CTX_INT_SRC_REPORT_PTR (CTX_LRI_INT_REPORT_PTR + 4)
+#define CTX_CS_INT_VEC_REG 0x5a
+#define CTX_CS_INT_VEC_DATA (CTX_CS_INT_VEC_REG + 1)
+
#define INDIRECT_CTX_RING_HEAD (0x02 + 1)
#define INDIRECT_CTX_RING_TAIL (0x04 + 1)
#define INDIRECT_CTX_RING_START (0x06 + 1)
diff --git a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
index 519dd1067a19..ef2bf984723f 100644
--- a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
@@ -34,11 +34,15 @@
#define PCU_CR_PACKAGE_ENERGY_STATUS XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x593c)
+#define PCU_CR_PACKAGE_TEMPERATURE XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5978)
+#define TEMP_MASK REG_GENMASK(7, 0)
+
#define PCU_CR_PACKAGE_RAPL_LIMIT XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x59a0)
-#define PKG_PWR_LIM_1 REG_GENMASK(14, 0)
-#define PKG_PWR_LIM_1_EN REG_BIT(15)
-#define PKG_PWR_LIM_1_TIME REG_GENMASK(23, 17)
-#define PKG_PWR_LIM_1_TIME_X REG_GENMASK(23, 22)
-#define PKG_PWR_LIM_1_TIME_Y REG_GENMASK(21, 17)
+#define PWR_LIM_VAL REG_GENMASK(14, 0)
+#define PWR_LIM_EN REG_BIT(15)
+#define PWR_LIM REG_GENMASK(15, 0)
+#define PWR_LIM_TIME REG_GENMASK(23, 17)
+#define PWR_LIM_TIME_X REG_GENMASK(23, 22)
+#define PWR_LIM_TIME_Y REG_GENMASK(21, 17)
#endif /* _XE_MCHBAR_REGS_H_ */
diff --git a/drivers/gpu/drm/xe/regs/xe_oa_regs.h b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
index a9b0091cb7ee..a79ad2da070c 100644
--- a/drivers/gpu/drm/xe/regs/xe_oa_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
@@ -41,14 +41,6 @@
#define OAG_OABUFFER XE_REG(0xdb08)
#define OABUFFER_SIZE_MASK REG_GENMASK(5, 3)
-#define OABUFFER_SIZE_128K REG_FIELD_PREP(OABUFFER_SIZE_MASK, 0)
-#define OABUFFER_SIZE_256K REG_FIELD_PREP(OABUFFER_SIZE_MASK, 1)
-#define OABUFFER_SIZE_512K REG_FIELD_PREP(OABUFFER_SIZE_MASK, 2)
-#define OABUFFER_SIZE_1M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 3)
-#define OABUFFER_SIZE_2M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 4)
-#define OABUFFER_SIZE_4M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 5)
-#define OABUFFER_SIZE_8M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 6)
-#define OABUFFER_SIZE_16M REG_FIELD_PREP(OABUFFER_SIZE_MASK, 7)
#define OAG_OABUFFER_MEMORY_SELECT REG_BIT(0) /* 0: PPGTT, 1: GGTT */
#define OAG_OACONTROL XE_REG(0xdaf4)
@@ -59,10 +51,15 @@
/* Common to all OA units */
#define OA_OACONTROL_REPORT_BC_MASK REG_GENMASK(9, 9)
#define OA_OACONTROL_COUNTER_SIZE_MASK REG_GENMASK(8, 8)
+#define OAG_OACONTROL_USED_BITS \
+ (OAG_OACONTROL_OA_PES_DISAG_EN | OAG_OACONTROL_OA_CCS_SELECT_MASK | \
+ OAG_OACONTROL_OA_COUNTER_SEL_MASK | OAG_OACONTROL_OA_COUNTER_ENABLE | \
+ OA_OACONTROL_REPORT_BC_MASK | OA_OACONTROL_COUNTER_SIZE_MASK)
#define OAG_OA_DEBUG XE_REG(0xdaf8, XE_REG_OPTION_MASKED)
#define OAG_OA_DEBUG_DISABLE_MMIO_TRG REG_BIT(14)
#define OAG_OA_DEBUG_START_TRIGGER_SCOPE_CONTROL REG_BIT(13)
+#define OAG_OA_DEBUG_BUF_SIZE_SELECT REG_BIT(12)
#define OAG_OA_DEBUG_DISABLE_START_TRG_2_COUNT_QUAL REG_BIT(8)
#define OAG_OA_DEBUG_DISABLE_START_TRG_1_COUNT_QUAL REG_BIT(7)
#define OAG_OA_DEBUG_INCLUDE_CLK_RATIO REG_BIT(6)
@@ -85,6 +82,8 @@
#define OAM_CONTEXT_CONTROL_OFFSET (0x1bc)
#define OAM_CONTROL_OFFSET (0x194)
#define OAM_CONTROL_COUNTER_SEL_MASK REG_GENMASK(3, 1)
+#define OAM_OACONTROL_USED_BITS \
+ (OAM_CONTROL_COUNTER_SEL_MASK | OAG_OACONTROL_OA_COUNTER_ENABLE)
#define OAM_DEBUG_OFFSET (0x198)
#define OAM_STATUS_OFFSET (0x19c)
#define OAM_MMIO_TRG_OFFSET (0x1d0)
diff --git a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
index 0b0b49d850ae..c556a04670ee 100644
--- a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
@@ -18,11 +18,12 @@
#define PVC_GT0_PLATFORM_ENERGY_STATUS XE_REG(0x28106c)
#define PVC_GT0_PACKAGE_POWER_SKU XE_REG(0x281080)
-#define BMG_PACKAGE_POWER_SKU XE_REG(0x138098)
-#define BMG_PACKAGE_POWER_SKU_UNIT XE_REG(0x1380dc)
#define BMG_PACKAGE_ENERGY_STATUS XE_REG(0x138120)
-#define BMG_PACKAGE_RAPL_LIMIT XE_REG(0x138440)
+#define BMG_FAN_1_SPEED XE_REG(0x138140)
+#define BMG_FAN_2_SPEED XE_REG(0x138170)
+#define BMG_FAN_3_SPEED XE_REG(0x1381a0)
+#define BMG_VRAM_TEMPERATURE XE_REG(0x1382c0)
+#define BMG_PACKAGE_TEMPERATURE XE_REG(0x138434)
#define BMG_PLATFORM_ENERGY_STATUS XE_REG(0x138458)
-#define BMG_PLATFORM_POWER_LIMIT XE_REG(0x138460)
#endif /* _XE_PCODE_REGS_H_ */
diff --git a/drivers/gpu/drm/xe/regs/xe_pmt.h b/drivers/gpu/drm/xe/regs/xe_pmt.h
new file mode 100644
index 000000000000..f45abcd96ba8
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_pmt.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+#ifndef _XE_PMT_H_
+#define _XE_PMT_H_
+
+#define SOC_BASE 0x280000
+
+#define BMG_PMT_BASE_OFFSET 0xDB000
+#define BMG_DISCOVERY_OFFSET (SOC_BASE + BMG_PMT_BASE_OFFSET)
+
+#define BMG_TELEMETRY_BASE_OFFSET 0xE0000
+#define BMG_TELEMETRY_OFFSET (SOC_BASE + BMG_TELEMETRY_BASE_OFFSET)
+
+#define SG_REMAP_INDEX1 XE_REG(SOC_BASE + 0x08)
+#define SG_REMAP_BITS REG_GENMASK(31, 24)
+
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_pxp_regs.h b/drivers/gpu/drm/xe/regs/xe_pxp_regs.h
new file mode 100644
index 000000000000..aa158938b42e
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_pxp_regs.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2024, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __XE_PXP_REGS_H__
+#define __XE_PXP_REGS_H__
+
+#include "regs/xe_regs.h"
+
+/* The following registers are only valid on platforms with a media GT */
+
+/* KCR enable/disable control */
+#define KCR_INIT XE_REG(0x3860f0)
+#define KCR_INIT_ALLOW_DISPLAY_ME_WRITES REG_BIT(14)
+
+/* KCR hwdrm session in play status 0-31 */
+#define KCR_SIP XE_REG(0x386260)
+
+/* PXP global terminate register for session termination */
+#define KCR_GLOBAL_TERMINATE XE_REG(0x3860f8)
+
+#endif /* __XE_PXP_REGS_H__ */
diff --git a/drivers/gpu/drm/xe/regs/xe_reg_defs.h b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
index 23f7dc5bbe99..c39aab843e35 100644
--- a/drivers/gpu/drm/xe/regs/xe_reg_defs.h
+++ b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
@@ -7,13 +7,25 @@
#define _XE_REG_DEFS_H_
#include <linux/build_bug.h>
+#include <linux/log2.h>
+#include <linux/sizes.h>
#include "compat-i915-headers/i915_reg_defs.h"
/**
+ * XE_REG_ADDR_MAX - The upper limit on MMIO register address
+ *
+ * This macro specifies the upper limit (not inclusive) on MMIO register offset
+ * supported by struct xe_reg and functions based on struct xe_mmio.
+ *
+ * Currently this is defined as 4 MiB.
+ */
+#define XE_REG_ADDR_MAX SZ_4M
+
+/**
* struct xe_reg - Register definition
*
- * Register defintion to be used by the individual register. Although the same
+ * Register definition to be used by the individual register. Although the same
* definition is used for xe_reg and xe_reg_mcr, they use different internal
* APIs for accesses.
*/
@@ -21,7 +33,7 @@ struct xe_reg {
union {
struct {
/** @addr: address */
- u32 addr:28;
+ u32 addr:const_ilog2(XE_REG_ADDR_MAX);
/**
* @masked: register is "masked", with upper 16bits used
* to identify the bits that are updated on the lower
@@ -41,10 +53,6 @@ struct xe_reg {
* @vf: register is accessible from the Virtual Function.
*/
u32 vf:1;
- /**
- * @ext: access MMIO extension space for current register.
- */
- u32 ext:1;
};
/** @raw: Raw value with both address and options */
u32 raw;
@@ -112,23 +120,13 @@ struct xe_reg_mcr {
#define XE_REG(r_, ...) ((const struct xe_reg)XE_REG_INITIALIZER(r_, ##__VA_ARGS__))
/**
- * XE_REG_EXT - Create a struct xe_reg from extension offset and additional
- * flags
- * @r_: Register extension offset
- * @...: Additional options like access mode. See struct xe_reg for available
- * options.
- */
-#define XE_REG_EXT(r_, ...) \
- ((const struct xe_reg)XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .ext = 1))
-
-/**
* XE_REG_MCR - Create a struct xe_reg_mcr from offset and additional flags
* @r_: Register offset
* @...: Additional options like access mode. See struct xe_reg for available
* options.
*/
#define XE_REG_MCR(r_, ...) ((const struct xe_reg_mcr){ \
- .__reg = XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .mcr = 1) \
+ .__reg = XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .mcr = 1) \
})
static inline bool xe_reg_is_valid(struct xe_reg r)
diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h
index dfa869f0dddd..3abb17d2ca33 100644
--- a/drivers/gpu/drm/xe/regs/xe_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_regs.h
@@ -7,14 +7,6 @@
#include "regs/xe_reg_defs.h"
-#define TIMESTAMP_OVERRIDE XE_REG(0x44074)
-#define TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK REG_GENMASK(15, 12)
-#define TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK REG_GENMASK(9, 0)
-
-#define PCU_IRQ_OFFSET 0x444e0
-#define GU_MISC_IRQ_OFFSET 0x444f0
-#define GU_MISC_GSE REG_BIT(27)
-
#define GU_CNTL_PROTECTED XE_REG(0x10100C)
#define DRIVERINT_FLR_DIS REG_BIT(31)
@@ -48,25 +40,19 @@
#define MTL_RP_STATE_CAP XE_REG(0x138000)
+#define MTL_GT_RPA_FREQUENCY XE_REG(0x138008)
#define MTL_GT_RPE_FREQUENCY XE_REG(0x13800c)
#define MTL_MEDIAP_STATE_CAP XE_REG(0x138020)
#define MTL_RPN_CAP_MASK REG_GENMASK(24, 16)
#define MTL_RP0_CAP_MASK REG_GENMASK(8, 0)
+#define MTL_MPA_FREQUENCY XE_REG(0x138028)
+#define MTL_RPA_MASK REG_GENMASK(8, 0)
+
#define MTL_MPE_FREQUENCY XE_REG(0x13802c)
#define MTL_RPE_MASK REG_GENMASK(8, 0)
-#define DG1_MSTR_TILE_INTR XE_REG(0x190008)
-#define DG1_MSTR_IRQ REG_BIT(31)
-#define DG1_MSTR_TILE(t) REG_BIT(t)
-
-#define GFX_MSTR_IRQ XE_REG(0x190010, XE_REG_OPTION_VF)
-#define MASTER_IRQ REG_BIT(31)
-#define GU_MISC_IRQ REG_BIT(29)
-#define DISPLAY_IRQ REG_BIT(16)
-#define GT_DW_IRQ(x) REG_BIT(x)
-
#define VF_CAP_REG XE_REG(0x1901f8, XE_REG_OPTION_VF)
#define VF_CAP REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 8dac069483e8..378dcd0fb414 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -6,6 +6,13 @@
#include <kunit/test.h>
#include <kunit/visibility.h>
+#include <linux/iosys-map.h>
+#include <linux/math64.h>
+#include <linux/prandom.h>
+#include <linux/swap.h>
+
+#include <uapi/linux/sysinfo.h>
+
#include "tests/xe_kunit_helpers.h"
#include "tests/xe_pci_test.h"
#include "tests/xe_test.h"
@@ -42,11 +49,18 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
KUNIT_FAIL(test, "Failed to submit bo clear.\n");
return PTR_ERR(fence);
}
+
+ if (dma_fence_wait_timeout(fence, false, 5 * HZ) <= 0) {
+ dma_fence_put(fence);
+ KUNIT_FAIL(test, "Timeout while clearing bo.\n");
+ return -ETIME;
+ }
+
dma_fence_put(fence);
}
/* Evict to system. CCS data should be copied. */
- ret = xe_bo_evict(bo, true);
+ ret = xe_bo_evict(bo);
if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n");
return ret;
@@ -238,7 +252,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
for_each_gt(__gt, xe, id)
xe_gt_sanitize(__gt);
- err = xe_bo_restore_kernel(xe);
+ err = xe_bo_restore_early(xe);
/*
* Snapshotting the CTB and copying back a potentially old
* version seems risky, depending on what might have been
@@ -250,17 +264,16 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
* however seems quite fragile not to also restart the GT. Try
* to do that here by triggering a GT reset.
*/
- for_each_gt(__gt, xe, id) {
- xe_gt_reset_async(__gt);
- flush_work(&__gt->reset.worker);
- }
+ for_each_gt(__gt, xe, id)
+ xe_gt_reset(__gt);
+
if (err) {
KUNIT_FAIL(test, "restore kernel err=%pe\n",
ERR_PTR(err));
goto cleanup_all;
}
- err = xe_bo_restore_user(xe);
+ err = xe_bo_restore_late(xe);
if (err) {
KUNIT_FAIL(test, "restore user err=%pe\n", ERR_PTR(err));
goto cleanup_all;
@@ -358,6 +371,241 @@ static void xe_bo_evict_kunit(struct kunit *test)
evict_test_run_device(xe);
}
+struct xe_bo_link {
+ struct list_head link;
+ struct xe_bo *bo;
+ u32 val;
+};
+
+#define XE_BO_SHRINK_SIZE ((unsigned long)SZ_64M)
+
+static int shrink_test_fill_random(struct xe_bo *bo, struct rnd_state *state,
+ struct xe_bo_link *link)
+{
+ struct iosys_map map;
+ int ret = ttm_bo_vmap(&bo->ttm, &map);
+ size_t __maybe_unused i;
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < bo->ttm.base.size; i += sizeof(u32)) {
+ u32 val = prandom_u32_state(state);
+
+ iosys_map_wr(&map, i, u32, val);
+ if (i == 0)
+ link->val = val;
+ }
+
+ ttm_bo_vunmap(&bo->ttm, &map);
+ return 0;
+}
+
+static bool shrink_test_verify(struct kunit *test, struct xe_bo *bo,
+ unsigned int bo_nr, struct rnd_state *state,
+ struct xe_bo_link *link)
+{
+ struct iosys_map map;
+ int ret = ttm_bo_vmap(&bo->ttm, &map);
+ size_t i;
+ bool failed = false;
+
+ if (ret) {
+ KUNIT_FAIL(test, "Error mapping bo %u for content check.\n", bo_nr);
+ return true;
+ }
+
+ for (i = 0; i < bo->ttm.base.size; i += sizeof(u32)) {
+ u32 val = prandom_u32_state(state);
+
+ if (iosys_map_rd(&map, i, u32) != val) {
+ KUNIT_FAIL(test, "Content not preserved, bo %u offset 0x%016llx",
+ bo_nr, (unsigned long long)i);
+ kunit_info(test, "Failed value is 0x%08x, recorded 0x%08x\n",
+ (unsigned int)iosys_map_rd(&map, i, u32), val);
+ if (i == 0 && val != link->val)
+ kunit_info(test, "Looks like PRNG is out of sync.\n");
+ failed = true;
+ break;
+ }
+ }
+
+ ttm_bo_vunmap(&bo->ttm, &map);
+
+ return failed;
+}
+
+/*
+ * Try to create system bos corresponding to twice the amount
+ * of available system memory to test shrinker functionality.
+ * If no swap space is available to accommodate the
+ * memory overcommit, mark bos purgeable.
+ */
+static int shrink_test_run_device(struct xe_device *xe)
+{
+ struct kunit *test = kunit_get_current_test();
+ LIST_HEAD(bos);
+ struct xe_bo_link *link, *next;
+ struct sysinfo si;
+ u64 ram, ram_and_swap, purgeable = 0, alloced, to_alloc, limit;
+ unsigned int interrupted = 0, successful = 0, count = 0;
+ struct rnd_state prng;
+ u64 rand_seed;
+ bool failed = false;
+
+ rand_seed = get_random_u64();
+ prandom_seed_state(&prng, rand_seed);
+ kunit_info(test, "Random seed is 0x%016llx.\n",
+ (unsigned long long)rand_seed);
+
+ /* Skip if execution time is expected to be too long. */
+
+ limit = SZ_32G;
+ /* IGFX with flat CCS needs to copy when swapping / shrinking */
+ if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe))
+ limit = SZ_16G;
+
+ si_meminfo(&si);
+ ram = (size_t)si.freeram * si.mem_unit;
+ if (ram > limit) {
+ kunit_skip(test, "Too long expected execution time.\n");
+ return 0;
+ }
+ to_alloc = ram * 2;
+
+ ram_and_swap = ram + get_nr_swap_pages() * PAGE_SIZE;
+ if (to_alloc > ram_and_swap)
+ purgeable = to_alloc - ram_and_swap;
+ purgeable += div64_u64(purgeable, 5);
+
+ kunit_info(test, "Free ram is %lu bytes. Will allocate twice of that.\n",
+ (unsigned long)ram);
+ for (alloced = 0; alloced < to_alloc; alloced += XE_BO_SHRINK_SIZE) {
+ struct xe_bo *bo;
+ unsigned int mem_type;
+ struct xe_ttm_tt *xe_tt;
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link) {
+ KUNIT_FAIL(test, "Unexpected link allocation failure\n");
+ failed = true;
+ break;
+ }
+
+ INIT_LIST_HEAD(&link->link);
+
+ /* We can create bos using WC caching here. But it is slower. */
+ bo = xe_bo_create_user(xe, NULL, NULL, XE_BO_SHRINK_SIZE,
+ DRM_XE_GEM_CPU_CACHING_WB,
+ XE_BO_FLAG_SYSTEM);
+ if (IS_ERR(bo)) {
+ if (bo != ERR_PTR(-ENOMEM) && bo != ERR_PTR(-ENOSPC) &&
+ bo != ERR_PTR(-EINTR) && bo != ERR_PTR(-ERESTARTSYS))
+ KUNIT_FAIL(test, "Error creating bo: %pe\n", bo);
+ kfree(link);
+ failed = true;
+ break;
+ }
+ xe_bo_lock(bo, false);
+ xe_tt = container_of(bo->ttm.ttm, typeof(*xe_tt), ttm);
+
+ /*
+ * Allocate purgeable bos first, because if we do it the
+ * other way around, they may not be subject to swapping...
+ */
+ if (alloced < purgeable) {
+ xe_ttm_tt_account_subtract(&xe_tt->ttm);
+ xe_tt->purgeable = true;
+ xe_ttm_tt_account_add(&xe_tt->ttm);
+ bo->ttm.priority = 0;
+ spin_lock(&bo->ttm.bdev->lru_lock);
+ ttm_bo_move_to_lru_tail(&bo->ttm);
+ spin_unlock(&bo->ttm.bdev->lru_lock);
+ } else {
+ int ret = shrink_test_fill_random(bo, &prng, link);
+
+ if (ret) {
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
+ KUNIT_FAIL(test, "Error filling bo with random data: %pe\n",
+ ERR_PTR(ret));
+ kfree(link);
+ failed = true;
+ break;
+ }
+ }
+
+ mem_type = bo->ttm.resource->mem_type;
+ xe_bo_unlock(bo);
+ link->bo = bo;
+ list_add_tail(&link->link, &bos);
+
+ if (mem_type != XE_PL_TT) {
+ KUNIT_FAIL(test, "Bo in incorrect memory type: %u\n",
+ bo->ttm.resource->mem_type);
+ failed = true;
+ }
+ cond_resched();
+ if (signal_pending(current))
+ break;
+ }
+
+ /*
+ * Read back and destroy bos. Reset the pseudo-random seed to get an
+ * identical pseudo-random number sequence for readback.
+ */
+ prandom_seed_state(&prng, rand_seed);
+ list_for_each_entry_safe(link, next, &bos, link) {
+ static struct ttm_operation_ctx ctx = {.interruptible = true};
+ struct xe_bo *bo = link->bo;
+ struct xe_ttm_tt *xe_tt;
+ int ret;
+
+ count++;
+ if (!signal_pending(current) && !failed) {
+ bool purgeable, intr = false;
+
+ xe_bo_lock(bo, NULL);
+
+ /* xe_tt->purgeable is cleared on validate. */
+ xe_tt = container_of(bo->ttm.ttm, typeof(*xe_tt), ttm);
+ purgeable = xe_tt->purgeable;
+ do {
+ ret = ttm_bo_validate(&bo->ttm, &tt_placement, &ctx);
+ if (ret == -EINTR)
+ intr = true;
+ } while (ret == -EINTR && !signal_pending(current));
+ if (!ret && !purgeable)
+ failed = shrink_test_verify(test, bo, count, &prng, link);
+
+ xe_bo_unlock(bo);
+ if (ret) {
+ KUNIT_FAIL(test, "Validation failed: %pe\n",
+ ERR_PTR(ret));
+ failed = true;
+ } else if (intr) {
+ interrupted++;
+ } else {
+ successful++;
+ }
+ }
+ xe_bo_put(link->bo);
+ list_del(&link->link);
+ kfree(link);
+ }
+ kunit_info(test, "Readbacks interrupted: %u successful: %u\n",
+ interrupted, successful);
+
+ return 0;
+}
+
+static void xe_bo_shrink_kunit(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+
+ shrink_test_run_device(xe);
+}
+
static struct kunit_case xe_bo_tests[] = {
KUNIT_CASE_PARAM(xe_ccs_migrate_kunit, xe_pci_live_device_gen_param),
KUNIT_CASE_PARAM(xe_bo_evict_kunit, xe_pci_live_device_gen_param),
@@ -371,3 +619,17 @@ struct kunit_suite xe_bo_test_suite = {
.init = xe_kunit_helper_xe_device_live_test_init,
};
EXPORT_SYMBOL_IF_KUNIT(xe_bo_test_suite);
+
+static struct kunit_case xe_bo_shrink_test[] = {
+ KUNIT_CASE_PARAM_ATTR(xe_bo_shrink_kunit, xe_pci_live_device_gen_param,
+ {.speed = KUNIT_SPEED_SLOW}),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_bo_shrink_test_suite = {
+ .name = "xe_bo_shrink",
+ .test_cases = xe_bo_shrink_test,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_bo_shrink_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
index cedd3e88a6fb..c53f67ce4b0a 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -65,7 +65,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
* the exporter and the importer should be the same bo.
*/
swap(exported->ttm.base.dma_buf, dmabuf);
- ret = xe_bo_evict(exported, true);
+ ret = xe_bo_evict(exported);
swap(exported->ttm.base.dma_buf, dmabuf);
if (ret) {
if (ret != -EINTR && ret != -ERESTARTSYS)
diff --git a/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c
new file mode 100644
index 000000000000..6faffcd74869
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <kunit/static_stub.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+
+#include "xe_device.h"
+#include "xe_ggtt.h"
+#include "xe_guc_ct.h"
+#include "xe_kunit_helpers.h"
+#include "xe_pci_test.h"
+
+#define DUT_GGTT_START SZ_1M
+#define DUT_GGTT_SIZE SZ_2M
+
+static struct xe_bo *replacement_xe_managed_bo_create_pin_map(struct xe_device *xe,
+ struct xe_tile *tile,
+ size_t size, u32 flags)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct xe_bo *bo;
+ void *buf;
+
+ bo = drmm_kzalloc(&xe->drm, sizeof(*bo), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo);
+
+ buf = drmm_kzalloc(&xe->drm, size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
+ bo->tile = tile;
+ bo->ttm.bdev = &xe->ttm;
+ bo->size = size;
+ iosys_map_set_vaddr(&bo->vmap, buf);
+
+ if (flags & XE_BO_FLAG_GGTT) {
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
+
+ bo->ggtt_node[tile->id] = xe_ggtt_node_init(ggtt);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo->ggtt_node[tile->id]);
+
+ KUNIT_ASSERT_EQ(test, 0,
+ drm_mm_insert_node_in_range(&ggtt->mm,
+ &bo->ggtt_node[tile->id]->base,
+ bo->size, SZ_4K,
+ 0, 0, U64_MAX, 0));
+ }
+
+ return bo;
+}
+
+static int guc_buf_test_init(struct kunit *test)
+{
+ struct xe_pci_fake_data fake = {
+ .sriov_mode = XE_SRIOV_MODE_PF,
+ .platform = XE_TIGERLAKE, /* some random platform */
+ .subplatform = XE_SUBPLATFORM_NONE,
+ };
+ struct xe_ggtt *ggtt;
+ struct xe_guc *guc;
+
+ test->priv = &fake;
+ xe_kunit_helper_xe_device_test_init(test);
+
+ ggtt = xe_device_get_root_tile(test->priv)->mem.ggtt;
+ guc = &xe_device_get_gt(test->priv, 0)->uc.guc;
+
+ drm_mm_init(&ggtt->mm, DUT_GGTT_START, DUT_GGTT_SIZE);
+ mutex_init(&ggtt->lock);
+
+ kunit_activate_static_stub(test, xe_managed_bo_create_pin_map,
+ replacement_xe_managed_bo_create_pin_map);
+
+ KUNIT_ASSERT_EQ(test, 0, xe_guc_buf_cache_init(&guc->buf));
+
+ test->priv = &guc->buf;
+ return 0;
+}
+
+static void test_smallest(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf buf;
+
+ buf = xe_guc_buf_reserve(cache, 1);
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
+ KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
+ xe_guc_buf_release(buf);
+}
+
+static void test_largest(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf buf;
+
+ buf = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache));
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
+ KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
+ xe_guc_buf_release(buf);
+}
+
+static void test_granular(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf *bufs;
+ int n, dwords;
+
+ dwords = xe_guc_buf_cache_dwords(cache);
+ bufs = kunit_kcalloc(test, dwords, sizeof(*bufs), GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, bufs);
+
+ for (n = 0; n < dwords; n++)
+ bufs[n] = xe_guc_buf_reserve(cache, 1);
+
+ for (n = 0; n < dwords; n++)
+ KUNIT_EXPECT_TRUE_MSG(test, xe_guc_buf_is_valid(bufs[n]), "n=%d", n);
+
+ for (n = 0; n < dwords; n++)
+ xe_guc_buf_release(bufs[n]);
+}
+
+static void test_unique(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf *bufs;
+ int n, m, dwords;
+
+ dwords = xe_guc_buf_cache_dwords(cache);
+ bufs = kunit_kcalloc(test, dwords, sizeof(*bufs), GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, bufs);
+
+ for (n = 0; n < dwords; n++)
+ bufs[n] = xe_guc_buf_reserve(cache, 1);
+
+ for (n = 0; n < dwords; n++) {
+ for (m = n + 1; m < dwords; m++) {
+ KUNIT_EXPECT_PTR_NE_MSG(test, xe_guc_buf_cpu_ptr(bufs[n]),
+ xe_guc_buf_cpu_ptr(bufs[m]), "n=%d, m=%d", n, m);
+ KUNIT_ASSERT_NE_MSG(test, xe_guc_buf_gpu_addr(bufs[n]),
+ xe_guc_buf_gpu_addr(bufs[m]), "n=%d, m=%d", n, m);
+ }
+ }
+
+ for (n = 0; n < dwords; n++)
+ xe_guc_buf_release(bufs[n]);
+}
+
+static void test_overlap(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf b1, b2;
+ u32 dwords = xe_guc_buf_cache_dwords(cache) / 2;
+ u32 bytes = dwords * sizeof(u32);
+ void *p1, *p2;
+ u64 a1, a2;
+
+ b1 = xe_guc_buf_reserve(cache, dwords);
+ b2 = xe_guc_buf_reserve(cache, dwords);
+
+ p1 = xe_guc_buf_cpu_ptr(b1);
+ p2 = xe_guc_buf_cpu_ptr(b2);
+
+ a1 = xe_guc_buf_gpu_addr(b1);
+ a2 = xe_guc_buf_gpu_addr(b2);
+
+ KUNIT_EXPECT_PTR_NE(test, p1, p2);
+ if (p1 < p2)
+ KUNIT_EXPECT_LT(test, (uintptr_t)(p1 + bytes - 1), (uintptr_t)p2);
+ else
+ KUNIT_EXPECT_LT(test, (uintptr_t)(p2 + bytes - 1), (uintptr_t)p1);
+
+ KUNIT_EXPECT_NE(test, a1, a2);
+ if (a1 < a2)
+ KUNIT_EXPECT_LT(test, a1 + bytes - 1, a2);
+ else
+ KUNIT_EXPECT_LT(test, a2 + bytes - 1, a1);
+
+ xe_guc_buf_release(b1);
+ xe_guc_buf_release(b2);
+}
+
+static void test_reusable(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf b1, b2;
+ void *p1;
+ u64 a1;
+
+ b1 = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache));
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(b1));
+ KUNIT_EXPECT_NOT_NULL(test, p1 = xe_guc_buf_cpu_ptr(b1));
+ KUNIT_EXPECT_NE(test, 0, a1 = xe_guc_buf_gpu_addr(b1));
+ xe_guc_buf_release(b1);
+
+ b2 = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache));
+ KUNIT_EXPECT_PTR_EQ(test, p1, xe_guc_buf_cpu_ptr(b2));
+ KUNIT_EXPECT_EQ(test, a1, xe_guc_buf_gpu_addr(b2));
+ xe_guc_buf_release(b2);
+}
+
+static void test_too_big(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf buf;
+
+ buf = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache) + 1);
+ KUNIT_EXPECT_FALSE(test, xe_guc_buf_is_valid(buf));
+ xe_guc_buf_release(buf); /* shouldn't crash */
+}
+
+static void test_flush(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf buf;
+ const u32 dwords = xe_guc_buf_cache_dwords(cache);
+ const u32 bytes = dwords * sizeof(u32);
+ u32 *s, *p, *d;
+ int n;
+
+ KUNIT_ASSERT_NOT_NULL(test, s = kunit_kcalloc(test, dwords, sizeof(u32), GFP_KERNEL));
+ KUNIT_ASSERT_NOT_NULL(test, d = kunit_kcalloc(test, dwords, sizeof(u32), GFP_KERNEL));
+
+ for (n = 0; n < dwords; n++)
+ s[n] = n;
+
+ buf = xe_guc_buf_reserve(cache, dwords);
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf));
+ KUNIT_EXPECT_PTR_NE(test, p, s);
+ KUNIT_EXPECT_PTR_NE(test, p, d);
+
+ memcpy(p, s, bytes);
+ KUNIT_EXPECT_NE(test, 0, xe_guc_buf_flush(buf));
+
+ iosys_map_memcpy_from(d, &cache->sam->bo->vmap, 0, bytes);
+ KUNIT_EXPECT_MEMEQ(test, s, d, bytes);
+
+ xe_guc_buf_release(buf);
+}
+
+static void test_lookup(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf buf;
+ u32 dwords;
+ u64 addr;
+ u32 *p;
+ int n;
+
+ dwords = xe_guc_buf_cache_dwords(cache);
+ buf = xe_guc_buf_reserve(cache, dwords);
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf));
+ KUNIT_ASSERT_NE(test, 0, addr = xe_guc_buf_gpu_addr(buf));
+
+ KUNIT_EXPECT_EQ(test, 0, xe_guc_cache_gpu_addr_from_ptr(cache, p - 1, sizeof(u32)));
+ KUNIT_EXPECT_EQ(test, 0, xe_guc_cache_gpu_addr_from_ptr(cache, p + dwords, sizeof(u32)));
+
+ for (n = 0; n < dwords; n++)
+ KUNIT_EXPECT_EQ_MSG(test, xe_guc_cache_gpu_addr_from_ptr(cache, p + n, sizeof(u32)),
+ addr + n * sizeof(u32), "n=%d", n);
+
+ xe_guc_buf_release(buf);
+}
+
+static void test_data(struct kunit *test)
+{
+ static const u32 data[] = { 1, 2, 3, 4, 5, 6 };
+ struct xe_guc_buf_cache *cache = test->priv;
+ struct xe_guc_buf buf;
+ void *p;
+
+ buf = xe_guc_buf_from_data(cache, data, sizeof(data));
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf));
+ KUNIT_EXPECT_MEMEQ(test, p, data, sizeof(data));
+
+ xe_guc_buf_release(buf);
+}
+
+static void test_class(struct kunit *test)
+{
+ struct xe_guc_buf_cache *cache = test->priv;
+ u32 dwords = xe_guc_buf_cache_dwords(cache);
+
+ {
+ CLASS(xe_guc_buf, buf)(cache, dwords);
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
+ KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
+ }
+
+ {
+ CLASS(xe_guc_buf, buf)(cache, dwords);
+ KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
+ KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
+ KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
+ KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
+ }
+}
+
+static struct kunit_case guc_buf_test_cases[] = {
+ KUNIT_CASE(test_smallest),
+ KUNIT_CASE(test_largest),
+ KUNIT_CASE(test_granular),
+ KUNIT_CASE(test_unique),
+ KUNIT_CASE(test_overlap),
+ KUNIT_CASE(test_reusable),
+ KUNIT_CASE(test_too_big),
+ KUNIT_CASE(test_flush),
+ KUNIT_CASE(test_lookup),
+ KUNIT_CASE(test_data),
+ KUNIT_CASE(test_class),
+ {}
+};
+
+static struct kunit_suite guc_buf_suite = {
+ .name = "guc_buf",
+ .test_cases = guc_buf_test_cases,
+ .init = guc_buf_test_init,
+};
+
+kunit_test_suites(&guc_buf_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
index 5f14737c8210..81277c77016d 100644
--- a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
+++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
@@ -6,11 +6,13 @@
#include <kunit/test.h>
extern struct kunit_suite xe_bo_test_suite;
+extern struct kunit_suite xe_bo_shrink_test_suite;
extern struct kunit_suite xe_dma_buf_test_suite;
extern struct kunit_suite xe_migrate_test_suite;
extern struct kunit_suite xe_mocs_test_suite;
kunit_test_suite(xe_bo_test_suite);
+kunit_test_suite(xe_bo_shrink_test_suite);
kunit_test_suite(xe_dma_buf_test_suite);
kunit_test_suite(xe_migrate_test_suite);
kunit_test_suite(xe_mocs_test_suite);
@@ -18,4 +20,4 @@ kunit_test_suite(xe_mocs_test_suite);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("xe live kunit tests");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index 1a192a2a941b..4a65e3103f77 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -83,7 +83,8 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
bo->size,
ttm_bo_type_kernel,
region |
- XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(remote)) {
KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n",
str, remote);
@@ -201,8 +202,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile));
if (IS_ERR(big)) {
KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
goto vunmap;
@@ -210,8 +210,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile));
if (IS_ERR(pt)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt));
@@ -221,11 +220,10 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
2 * SZ_4K,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_VRAM_IF_DGFX(tile));
if (IS_ERR(tiny)) {
- KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
- PTR_ERR(pt));
+ KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n",
+ PTR_ERR(tiny));
goto free_pt;
}
@@ -511,7 +509,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
dma_fence_put(fence);
kunit_info(test, "Evict vram buffer object\n");
- ret = xe_bo_evict(vram_bo, true);
+ ret = xe_bo_evict(vram_bo);
if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n");
return;
@@ -642,7 +640,9 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
sys_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(sys_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
@@ -666,7 +666,8 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
ccs_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
- bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(ccs_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
@@ -690,7 +691,8 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
vram_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
- bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
if (IS_ERR(vram_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
PTR_ERR(vram_bo));
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index 79be73b4a02b..0e502feaca81 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -43,23 +43,25 @@ static void read_l3cc_table(struct xe_gt *gt,
{
struct kunit *test = kunit_get_current_test();
u32 l3cc, l3cc_expected;
- unsigned int i;
+ unsigned int fw_ref, i;
u32 reg_val;
- u32 ret;
- ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n");
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n");
+ }
for (i = 0; i < info->num_mocs_regs; i++) {
if (!(i & 1)) {
if (regs_are_mcr(gt))
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i >> 1));
else
- reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i >> 1));
+ reg_val = xe_mmio_read32(&gt->mmio, XELP_LNCFCMOCS(i >> 1));
mocs_dbg(gt, "reg_val=0x%x\n", reg_val);
} else {
- /* Just re-use value read on previous iteration */
+ /* Just reuse value read on previous iteration */
reg_val >>= 16;
}
@@ -72,7 +74,7 @@ static void read_l3cc_table(struct xe_gt *gt,
KUNIT_EXPECT_EQ_MSG(test, l3cc_expected, l3cc,
"l3cc idx=%u has incorrect val.\n", i);
}
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
static void read_mocs_table(struct xe_gt *gt,
@@ -80,21 +82,20 @@ static void read_mocs_table(struct xe_gt *gt,
{
struct kunit *test = kunit_get_current_test();
u32 mocs, mocs_expected;
- unsigned int i;
+ unsigned int fw_ref, i;
u32 reg_val;
- u32 ret;
KUNIT_EXPECT_TRUE_MSG(test, info->unused_entries_index,
"Unused entries index should have been defined\n");
- ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n");
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n");
for (i = 0; i < info->num_mocs_regs; i++) {
if (regs_are_mcr(gt))
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i));
else
- reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i));
+ reg_val = xe_mmio_read32(&gt->mmio, XELP_GLOBAL_MOCS(i));
mocs_expected = get_entry_control(info, i);
mocs = reg_val;
@@ -106,7 +107,7 @@ static void read_mocs_table(struct xe_gt *gt,
"mocs reg 0x%x has incorrect val.\n", i);
}
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
static int mocs_kernel_test_run_device(struct xe_device *xe)
@@ -164,8 +165,7 @@ static int mocs_reset_test_run_device(struct xe_device *xe)
if (flags & HAS_LNCF_MOCS)
read_l3cc_table(gt, &mocs.table);
- xe_gt_reset_async(gt);
- flush_work(&gt->reset.worker);
+ xe_gt_reset(gt);
kunit_info(test, "mocs_reset_test after reset\n");
if (flags & HAS_GLOBAL_MOCS)
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
index 67404863087e..1d3e2e50c355 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -21,15 +21,15 @@
*/
void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn)
{
- const struct xe_graphics_desc *ip, *last = NULL;
+ const struct xe_graphics_desc *desc, *last = NULL;
- for (int i = 0; i < ARRAY_SIZE(graphics_ip_map); i++) {
- ip = graphics_ip_map[i].ip;
- if (ip == last)
+ for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) {
+ desc = graphics_ips[i].desc;
+ if (desc == last)
continue;
- xe_fn(ip);
- last = ip;
+ xe_fn(desc);
+ last = desc;
}
}
EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_graphics_ip);
@@ -43,15 +43,15 @@ EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_graphics_ip);
*/
void xe_call_for_each_media_ip(xe_media_fn xe_fn)
{
- const struct xe_media_desc *ip, *last = NULL;
+ const struct xe_media_desc *desc, *last = NULL;
- for (int i = 0; i < ARRAY_SIZE(media_ip_map); i++) {
- ip = media_ip_map[i].ip;
- if (ip == last)
+ for (int i = 0; i < ARRAY_SIZE(media_ips); i++) {
+ desc = media_ips[i].desc;
+ if (desc == last)
continue;
- xe_fn(ip);
- last = ip;
+ xe_fn(desc);
+ last = desc;
}
}
EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_media_ip);
@@ -110,7 +110,7 @@ done:
kunit_activate_static_stub(test, read_gmdid, fake_read_gmdid);
xe_info_init_early(xe, desc, subplatform_desc);
- xe_info_init(xe, desc->graphics, desc->media);
+ xe_info_init(xe, desc);
return 0;
}
diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
index 36a3b5420fef..b0254b014fe4 100644
--- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
@@ -320,7 +320,7 @@ static void xe_rtp_process_to_sr_tests(struct kunit *test)
count_rtp_entries++;
xe_rtp_process_ctx_enable_active_tracking(&ctx, &active, count_rtp_entries);
- xe_rtp_process_to_sr(&ctx, param->entries, reg_sr);
+ xe_rtp_process_to_sr(&ctx, param->entries, count_rtp_entries, reg_sr);
xa_for_each(&reg_sr->xa, idx, sre) {
if (idx == param->expected_reg.addr)
diff --git a/drivers/gpu/drm/xe/tests/xe_test_mod.c b/drivers/gpu/drm/xe/tests/xe_test_mod.c
index 875f3e6f965e..93081bcf2ab0 100644
--- a/drivers/gpu/drm/xe/tests/xe_test_mod.c
+++ b/drivers/gpu/drm/xe/tests/xe_test_mod.c
@@ -7,4 +7,4 @@
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("xe kunit tests");
-MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
diff --git a/drivers/gpu/drm/xe/xe_assert.h b/drivers/gpu/drm/xe/xe_assert.h
index e22bbf57fca7..68fe70ce2be3 100644
--- a/drivers/gpu/drm/xe/xe_assert.h
+++ b/drivers/gpu/drm/xe/xe_assert.h
@@ -10,11 +10,11 @@
#include <drm/drm_print.h>
-#include "xe_device_types.h"
+#include "xe_gt_types.h"
#include "xe_step.h"
/**
- * DOC: Xe ASSERTs
+ * DOC: Xe Asserts
*
* While Xe driver aims to be simpler than legacy i915 driver it is still
* complex enough that some changes introduced while adding new functionality
@@ -103,7 +103,7 @@
* (&CONFIG_DRM_XE_DEBUG must be enabled) and cannot be used in expressions
* or as a condition.
*
- * See `Xe ASSERTs`_ for general usage guidelines.
+ * See `Xe Asserts`_ for general usage guidelines.
*/
#define xe_assert(xe, condition) xe_assert_msg((xe), condition, "")
#define xe_assert_msg(xe, condition, msg, arg...) ({ \
@@ -138,7 +138,7 @@
* (&CONFIG_DRM_XE_DEBUG must be enabled) and cannot be used in expressions
* or as a condition.
*
- * See `Xe ASSERTs`_ for general usage guidelines.
+ * See `Xe Asserts`_ for general usage guidelines.
*/
#define xe_tile_assert(tile, condition) xe_tile_assert_msg((tile), condition, "")
#define xe_tile_assert_msg(tile, condition, msg, arg...) ({ \
@@ -162,7 +162,7 @@
* (&CONFIG_DRM_XE_DEBUG must be enabled) and cannot be used in expressions
* or as a condition.
*
- * See `Xe ASSERTs`_ for general usage guidelines.
+ * See `Xe Asserts`_ for general usage guidelines.
*/
#define xe_gt_assert(gt, condition) xe_gt_assert_msg((gt), condition, "")
#define xe_gt_assert_msg(gt, condition, msg, arg...) ({ \
diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c
index ef777dbdf4ec..9570672fce33 100644
--- a/drivers/gpu/drm/xe/xe_bb.c
+++ b/drivers/gpu/drm/xe/xe_bb.c
@@ -41,7 +41,7 @@ struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm)
/*
* We need to allocate space for the requested number of dwords,
* one additional MI_BATCH_BUFFER_END dword, and additional buffer
- * space to accomodate the platform-specific hardware prefetch
+ * space to accommodate the platform-specific hardware prefetch
* requirements.
*/
bb->bo = xe_sa_bo_new(!usm ? tile->mem.kernel_bb_pool : gt->usm.bb_pool,
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index e5f51fd23c65..7aa2c17825da 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -6,15 +6,19 @@
#include "xe_bo.h"
#include <linux/dma-buf.h>
+#include <linux/nospec.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_managed.h>
+#include <drm/ttm/ttm_backup.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
#include <uapi/drm/xe_drm.h>
+#include <kunit/static_stub.h>
+
#include "xe_device.h"
#include "xe_dma_buf.h"
#include "xe_drm_client.h"
@@ -24,7 +28,9 @@
#include "xe_migrate.h"
#include "xe_pm.h"
#include "xe_preempt_fence.h"
+#include "xe_pxp.h"
#include "xe_res_cursor.h"
+#include "xe_shrinker.h"
#include "xe_trace_bo.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
@@ -49,6 +55,8 @@ static struct ttm_placement sys_placement = {
.placement = &sys_placement_flags,
};
+static struct ttm_placement purge_placement;
+
static const struct ttm_place tt_placement_flags[] = {
{
.fpfn = 0,
@@ -124,6 +132,22 @@ bool xe_bo_is_stolen_devmem(struct xe_bo *bo)
GRAPHICS_VERx100(xe_bo_device(bo)) >= 1270;
}
+/**
+ * xe_bo_is_vm_bound - check if BO has any mappings through VM_BIND
+ * @bo: The BO
+ *
+ * Check if a given bo is bound through VM_BIND. This requires the
+ * reservation lock for the BO to be held.
+ *
+ * Returns: boolean
+ */
+bool xe_bo_is_vm_bound(struct xe_bo *bo)
+{
+ xe_bo_assert_held(bo);
+
+ return !list_empty(&bo->ttm.base.gpuva.list);
+}
+
static bool xe_bo_is_user(struct xe_bo *bo)
{
return bo->flags & XE_BO_FLAG_USER;
@@ -139,14 +163,17 @@ mem_type_to_migrate(struct xe_device *xe, u32 mem_type)
return tile->migrate;
}
-static struct xe_mem_region *res_to_mem_region(struct ttm_resource *res)
+static struct xe_vram_region *res_to_mem_region(struct ttm_resource *res)
{
struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
struct ttm_resource_manager *mgr;
+ struct xe_ttm_vram_mgr *vram_mgr;
xe_assert(xe, resource_is_vram(res));
mgr = ttm_manager_type(&xe->ttm, res->mem_type);
- return to_xe_ttm_vram_mgr(mgr)->vram;
+ vram_mgr = to_xe_ttm_vram_mgr(mgr);
+
+ return container_of(vram_mgr, struct xe_vram_region, ttm);
}
static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
@@ -162,25 +189,39 @@ static void try_add_system(struct xe_device *xe, struct xe_bo *bo,
}
}
+static bool force_contiguous(u32 bo_flags)
+{
+ if (bo_flags & XE_BO_FLAG_STOLEN)
+ return true; /* users expect this */
+ else if (bo_flags & XE_BO_FLAG_PINNED &&
+ !(bo_flags & XE_BO_FLAG_PINNED_LATE_RESTORE))
+ return true; /* needs vmap */
+
+ /*
+ * For eviction / restore on suspend / resume objects pinned in VRAM
+ * must be contiguous, also only contiguous BOs support xe_bo_vmap.
+ */
+ return bo_flags & XE_BO_FLAG_NEEDS_CPU_ACCESS &&
+ bo_flags & XE_BO_FLAG_PINNED;
+}
+
static void add_vram(struct xe_device *xe, struct xe_bo *bo,
struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
{
struct ttm_place place = { .mem_type = mem_type };
- struct xe_mem_region *vram;
+ struct ttm_resource_manager *mgr = ttm_manager_type(&xe->ttm, mem_type);
+ struct xe_ttm_vram_mgr *vram_mgr = to_xe_ttm_vram_mgr(mgr);
+
+ struct xe_vram_region *vram;
u64 io_size;
xe_assert(xe, *c < ARRAY_SIZE(bo->placements));
- vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram;
+ vram = container_of(vram_mgr, struct xe_vram_region, ttm);
xe_assert(xe, vram && vram->usable_size);
io_size = vram->io_size;
- /*
- * For eviction / restore on suspend / resume objects
- * pinned in VRAM must be contiguous
- */
- if (bo_flags & (XE_BO_FLAG_PINNED |
- XE_BO_FLAG_GGTT))
+ if (force_contiguous(bo_flags))
place.flags |= TTM_PL_FLAG_CONTIGUOUS;
if (io_size < vram->usable_size) {
@@ -212,8 +253,7 @@ static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
bo->placements[*c] = (struct ttm_place) {
.mem_type = XE_PL_STOLEN,
- .flags = bo_flags & (XE_BO_FLAG_PINNED |
- XE_BO_FLAG_GGTT) ?
+ .flags = force_contiguous(bo_flags) ?
TTM_PL_FLAG_CONTIGUOUS : 0,
};
*c += 1;
@@ -250,6 +290,10 @@ int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
static void xe_evict_flags(struct ttm_buffer_object *tbo,
struct ttm_placement *placement)
{
+ struct xe_device *xe = container_of(tbo->bdev, typeof(*xe), ttm);
+ bool device_unplugged = drm_dev_is_unplugged(&xe->drm);
+ struct xe_bo *bo;
+
if (!xe_bo_is_xe_bo(tbo)) {
/* Don't handle scatter gather BOs */
if (tbo->type == ttm_bo_type_sg) {
@@ -257,10 +301,21 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
return;
}
+ *placement = device_unplugged ? purge_placement : sys_placement;
+ return;
+ }
+
+ bo = ttm_to_xe_bo(tbo);
+ if (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) {
*placement = sys_placement;
return;
}
+ if (device_unplugged && !tbo->base.dma_buf) {
+ *placement = purge_placement;
+ return;
+ }
+
/*
* For xe, sg bos that are evicted to system just triggers a
* rebind of the sg list upon subsequent validation to XE_PL_TT.
@@ -278,11 +333,15 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
}
}
+/* struct xe_ttm_tt - Subclassed ttm_tt for xe */
struct xe_ttm_tt {
struct ttm_tt ttm;
- struct device *dev;
+ /** @xe - The xe device */
+ struct xe_device *xe;
struct sg_table sgt;
struct sg_table *sg;
+ /** @purgeable: Whether the content of the pages of @ttm is purgeable. */
+ bool purgeable;
};
static int xe_tt_map_sg(struct ttm_tt *tt)
@@ -291,7 +350,8 @@ static int xe_tt_map_sg(struct ttm_tt *tt)
unsigned long num_pages = tt->num_pages;
int ret;
- XE_WARN_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL);
+ XE_WARN_ON((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
+ !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE));
if (xe_tt->sg)
return 0;
@@ -299,13 +359,13 @@ static int xe_tt_map_sg(struct ttm_tt *tt)
ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
num_pages, 0,
(u64)num_pages << PAGE_SHIFT,
- xe_sg_segment_size(xe_tt->dev),
+ xe_sg_segment_size(xe_tt->xe->drm.dev),
GFP_KERNEL);
if (ret)
return ret;
xe_tt->sg = &xe_tt->sgt;
- ret = dma_map_sgtable(xe_tt->dev, xe_tt->sg, DMA_BIDIRECTIONAL,
+ ret = dma_map_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC);
if (ret) {
sg_free_table(xe_tt->sg);
@@ -321,7 +381,7 @@ static void xe_tt_unmap_sg(struct ttm_tt *tt)
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
if (xe_tt->sg) {
- dma_unmap_sgtable(xe_tt->dev, xe_tt->sg,
+ dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg,
DMA_BIDIRECTIONAL, 0);
sg_free_table(xe_tt->sg);
xe_tt->sg = NULL;
@@ -336,21 +396,47 @@ struct sg_table *xe_bo_sg(struct xe_bo *bo)
return xe_tt->sg;
}
+/*
+ * Account ttm pages against the device shrinker's shrinkable and
+ * purgeable counts.
+ */
+static void xe_ttm_tt_account_add(struct ttm_tt *tt)
+{
+ struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
+
+ if (xe_tt->purgeable)
+ xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, tt->num_pages);
+ else
+ xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, tt->num_pages, 0);
+}
+
+static void xe_ttm_tt_account_subtract(struct ttm_tt *tt)
+{
+ struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
+
+ if (xe_tt->purgeable)
+ xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, -(long)tt->num_pages);
+ else
+ xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, -(long)tt->num_pages, 0);
+}
+
static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
u32 page_flags)
{
struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
struct xe_device *xe = xe_bo_device(bo);
- struct xe_ttm_tt *tt;
+ struct xe_ttm_tt *xe_tt;
+ struct ttm_tt *tt;
unsigned long extra_pages;
enum ttm_caching caching = ttm_cached;
int err;
- tt = kzalloc(sizeof(*tt), GFP_KERNEL);
- if (!tt)
+ xe_tt = kzalloc(sizeof(*xe_tt), GFP_KERNEL);
+ if (!xe_tt)
return NULL;
- tt->dev = xe->drm.dev;
+ tt = &xe_tt->ttm;
+ xe_tt->xe = xe;
extra_pages = 0;
if (xe_bo_needs_ccs_pages(bo))
@@ -396,42 +482,66 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
caching = ttm_uncached;
}
- err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
+ if (ttm_bo->type != ttm_bo_type_sg)
+ page_flags |= TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE;
+
+ err = ttm_tt_init(tt, &bo->ttm, page_flags, caching, extra_pages);
if (err) {
- kfree(tt);
+ kfree(xe_tt);
return NULL;
}
- return &tt->ttm;
+ if (ttm_bo->type != ttm_bo_type_sg) {
+ err = ttm_tt_setup_backup(tt);
+ if (err) {
+ ttm_tt_fini(tt);
+ kfree(xe_tt);
+ return NULL;
+ }
+ }
+
+ return tt;
}
static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
struct ttm_operation_ctx *ctx)
{
+ struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
int err;
/*
* dma-bufs are not populated with pages, and the dma-
* addresses are set up when moved to XE_PL_TT.
*/
- if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
+ if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
+ !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))
return 0;
- err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx);
+ if (ttm_tt_is_backed_up(tt) && !xe_tt->purgeable) {
+ err = ttm_tt_restore(ttm_dev, tt, ctx);
+ } else {
+ ttm_tt_clear_backed_up(tt);
+ err = ttm_pool_alloc(&ttm_dev->pool, tt, ctx);
+ }
if (err)
return err;
- return err;
+ xe_tt->purgeable = false;
+ xe_ttm_tt_account_add(tt);
+
+ return 0;
}
static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
{
- if (tt->page_flags & TTM_TT_FLAG_EXTERNAL)
+ if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
+ !(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))
return;
xe_tt_unmap_sg(tt);
- return ttm_pool_free(&ttm_dev->pool, tt);
+ ttm_pool_free(&ttm_dev->pool, tt);
+ xe_ttm_tt_account_subtract(tt);
}
static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
@@ -440,6 +550,14 @@ static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
kfree(tt);
}
+static bool xe_ttm_resource_visible(struct ttm_resource *mem)
+{
+ struct xe_ttm_vram_mgr_resource *vres =
+ to_xe_ttm_vram_mgr_resource(mem);
+
+ return vres->used_visible_size == mem->size;
+}
+
static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
@@ -451,11 +569,9 @@ static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
return 0;
case XE_PL_VRAM0:
case XE_PL_VRAM1: {
- struct xe_ttm_vram_mgr_resource *vres =
- to_xe_ttm_vram_mgr_resource(mem);
- struct xe_mem_region *vram = res_to_mem_region(mem);
+ struct xe_vram_region *vram = res_to_mem_region(mem);
- if (vres->used_visible_size < mem->size)
+ if (!xe_ttm_resource_visible(mem))
return -EINVAL;
mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -468,7 +584,7 @@ static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
mem->bus.offset += vram->io_start;
mem->bus.is_iomem = true;
-#if !defined(CONFIG_X86)
+#if !IS_ENABLED(CONFIG_X86)
mem->bus.caching = ttm_write_combined;
#endif
return 0;
@@ -557,11 +673,20 @@ static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt,
ttm);
struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+ bool device_unplugged = drm_dev_is_unplugged(&xe->drm);
struct sg_table *sg;
xe_assert(xe, attach);
xe_assert(xe, ttm_bo->ttm);
+ if (device_unplugged && new_res->mem_type == XE_PL_SYSTEM &&
+ ttm_bo->sg) {
+ dma_resv_wait_timeout(ttm_bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
+ ttm_bo->sg = NULL;
+ }
+
if (new_res->mem_type == XE_PL_SYSTEM)
goto out;
@@ -697,6 +822,20 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
goto out;
}
+ if (!move_lacks_source && (bo->flags & XE_BO_FLAG_CPU_ADDR_MIRROR) &&
+ new_mem->mem_type == XE_PL_SYSTEM) {
+ ret = xe_svm_bo_evict(bo);
+ if (!ret) {
+ drm_dbg(&xe->drm, "Evict system allocator BO success\n");
+ ttm_bo_move_null(ttm_bo, new_mem);
+ } else {
+ drm_dbg(&xe->drm, "Evict system allocator BO failed=%pe\n",
+ ERR_PTR(ret));
+ }
+
+ goto out;
+ }
+
if (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT && !handle_system_ccs) {
ttm_bo_move_null(ttm_bo, new_mem);
goto out;
@@ -722,7 +861,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
new_mem->mem_type == XE_PL_SYSTEM) {
long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
DMA_RESV_USAGE_BOOKKEEP,
- true,
+ false,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
ret = timeout;
@@ -761,7 +900,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
if (xe_rpm_reclaim_safe(xe)) {
/*
* We might be called through swapout in the validation path of
- * another TTM device, so unconditionally acquire rpm here.
+ * another TTM device, so acquire rpm here.
*/
xe_pm_runtime_get(xe);
} else {
@@ -769,94 +908,265 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
xe_pm_runtime_get_noresume(xe);
}
- if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
- /*
- * Kernel memory that is pinned should only be moved on suspend
- * / resume, some of the pinned memory is required for the
- * device to resume / use the GPU to move other evicted memory
- * (user memory) around. This likely could be optimized a bit
- * futher where we find the minimum set of pinned memory
- * required for resume but for simplity doing a memcpy for all
- * pinned memory.
- */
- ret = xe_bo_vmap(bo);
- if (!ret) {
- ret = ttm_bo_move_memcpy(ttm_bo, ctx, new_mem);
+ if (move_lacks_source) {
+ u32 flags = 0;
- /* Create a new VMAP once kernel BO back in VRAM */
- if (!ret && resource_is_vram(new_mem)) {
- struct xe_mem_region *vram = res_to_mem_region(new_mem);
- void __iomem *new_addr = vram->mapping +
- (new_mem->start << PAGE_SHIFT);
+ if (mem_type_is_vram(new_mem->mem_type))
+ flags |= XE_MIGRATE_CLEAR_FLAG_FULL;
+ else if (handle_system_ccs)
+ flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA;
- if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
- ret = -EINVAL;
- xe_pm_runtime_put(xe);
- goto out;
- }
-
- xe_assert(xe, new_mem->start ==
- bo->placements->fpfn);
-
- iosys_map_set_vaddr_iomem(&bo->vmap, new_addr);
- }
- }
+ fence = xe_migrate_clear(migrate, bo, new_mem, flags);
} else {
- if (move_lacks_source) {
- u32 flags = 0;
-
- if (mem_type_is_vram(new_mem->mem_type))
- flags |= XE_MIGRATE_CLEAR_FLAG_FULL;
- else if (handle_system_ccs)
- flags |= XE_MIGRATE_CLEAR_FLAG_CCS_DATA;
-
- fence = xe_migrate_clear(migrate, bo, new_mem, flags);
- }
- else
- fence = xe_migrate_copy(migrate, bo, bo, old_mem,
- new_mem, handle_system_ccs);
- if (IS_ERR(fence)) {
- ret = PTR_ERR(fence);
- xe_pm_runtime_put(xe);
- goto out;
- }
- if (!move_lacks_source) {
- ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict,
- true, new_mem);
- if (ret) {
- dma_fence_wait(fence, false);
- ttm_bo_move_null(ttm_bo, new_mem);
- ret = 0;
- }
- } else {
- /*
- * ttm_bo_move_accel_cleanup() may blow up if
- * bo->resource == NULL, so just attach the
- * fence and set the new resource.
- */
- dma_resv_add_fence(ttm_bo->base.resv, fence,
- DMA_RESV_USAGE_KERNEL);
+ fence = xe_migrate_copy(migrate, bo, bo, old_mem, new_mem,
+ handle_system_ccs);
+ }
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ xe_pm_runtime_put(xe);
+ goto out;
+ }
+ if (!move_lacks_source) {
+ ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict, true,
+ new_mem);
+ if (ret) {
+ dma_fence_wait(fence, false);
ttm_bo_move_null(ttm_bo, new_mem);
+ ret = 0;
}
-
- dma_fence_put(fence);
+ } else {
+ /*
+ * ttm_bo_move_accel_cleanup() may blow up if
+ * bo->resource == NULL, so just attach the
+ * fence and set the new resource.
+ */
+ dma_resv_add_fence(ttm_bo->base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ ttm_bo_move_null(ttm_bo, new_mem);
}
+ dma_fence_put(fence);
xe_pm_runtime_put(xe);
out:
if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) &&
- ttm_bo->ttm)
+ ttm_bo->ttm) {
+ long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
+ DMA_RESV_USAGE_KERNEL,
+ false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0)
+ ret = timeout;
+
xe_tt_unmap_sg(ttm_bo->ttm);
+ }
+
+ return ret;
+}
+
+static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
+ struct ttm_buffer_object *bo,
+ unsigned long *scanned)
+{
+ long lret;
+
+ /* Fake move to system, without copying data. */
+ if (bo->resource->mem_type != XE_PL_SYSTEM) {
+ struct ttm_resource *new_resource;
+
+ lret = ttm_bo_wait_ctx(bo, ctx);
+ if (lret)
+ return lret;
+
+ lret = ttm_bo_mem_space(bo, &sys_placement, &new_resource, ctx);
+ if (lret)
+ return lret;
+
+ xe_tt_unmap_sg(bo->ttm);
+ ttm_bo_move_null(bo, new_resource);
+ }
+
+ *scanned += bo->ttm->num_pages;
+ lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags)
+ {.purge = true,
+ .writeback = false,
+ .allow_move = false});
+
+ if (lret > 0)
+ xe_ttm_tt_account_subtract(bo->ttm);
+
+ return lret;
+}
+
+static bool
+xe_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place)
+{
+ struct drm_gpuvm_bo *vm_bo;
+
+ if (!ttm_bo_eviction_valuable(bo, place))
+ return false;
+
+ if (!xe_bo_is_xe_bo(bo))
+ return true;
+
+ drm_gem_for_each_gpuvm_bo(vm_bo, &bo->base) {
+ if (xe_vm_is_validating(gpuvm_to_vm(vm_bo->vm)))
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * xe_bo_shrink() - Try to shrink an xe bo.
+ * @ctx: The struct ttm_operation_ctx used for shrinking.
+ * @bo: The TTM buffer object whose pages to shrink.
+ * @flags: Flags governing the shrink behaviour.
+ * @scanned: Pointer to a counter of the number of pages
+ * attempted to shrink.
+ *
+ * Try to shrink- or purge a bo, and if it succeeds, unmap dma.
+ * Note that we need to be able to handle also non xe bos
+ * (ghost bos), but only if the struct ttm_tt is embedded in
+ * a struct xe_ttm_tt. When the function attempts to shrink
+ * the pages of a buffer object, The value pointed to by @scanned
+ * is updated.
+ *
+ * Return: The number of pages shrunken or purged, or negative error
+ * code on failure.
+ */
+long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
+ const struct xe_bo_shrink_flags flags,
+ unsigned long *scanned)
+{
+ struct ttm_tt *tt = bo->ttm;
+ struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
+ struct ttm_place place = {.mem_type = bo->resource->mem_type};
+ struct xe_bo *xe_bo = ttm_to_xe_bo(bo);
+ struct xe_device *xe = xe_tt->xe;
+ bool needs_rpm;
+ long lret = 0L;
+
+ if (!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE) ||
+ (flags.purge && !xe_tt->purgeable))
+ return -EBUSY;
+
+ if (!xe_bo_eviction_valuable(bo, &place))
+ return -EBUSY;
+
+ if (!xe_bo_is_xe_bo(bo) || !xe_bo_get_unless_zero(xe_bo))
+ return xe_bo_shrink_purge(ctx, bo, scanned);
+
+ if (xe_tt->purgeable) {
+ if (bo->resource->mem_type != XE_PL_SYSTEM)
+ lret = xe_bo_move_notify(xe_bo, ctx);
+ if (!lret)
+ lret = xe_bo_shrink_purge(ctx, bo, scanned);
+ goto out_unref;
+ }
+
+ /* System CCS needs gpu copy when moving PL_TT -> PL_SYSTEM */
+ needs_rpm = (!IS_DGFX(xe) && bo->resource->mem_type != XE_PL_SYSTEM &&
+ xe_bo_needs_ccs_pages(xe_bo));
+ if (needs_rpm && !xe_pm_runtime_get_if_active(xe))
+ goto out_unref;
+
+ *scanned += tt->num_pages;
+ lret = ttm_bo_shrink(ctx, bo, (struct ttm_bo_shrink_flags)
+ {.purge = false,
+ .writeback = flags.writeback,
+ .allow_move = true});
+ if (needs_rpm)
+ xe_pm_runtime_put(xe);
+
+ if (lret > 0)
+ xe_ttm_tt_account_subtract(tt);
+
+out_unref:
+ xe_bo_put(xe_bo);
+
+ return lret;
+}
+
+/**
+ * xe_bo_notifier_prepare_pinned() - Prepare a pinned VRAM object to be backed
+ * up in system memory.
+ * @bo: The buffer object to prepare.
+ *
+ * On successful completion, the object backup pages are allocated. Expectation
+ * is that this is called from the PM notifier, prior to suspend/hibernation.
+ *
+ * Return: 0 on success. Negative error code on failure.
+ */
+int xe_bo_notifier_prepare_pinned(struct xe_bo *bo)
+{
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_bo *backup;
+ int ret = 0;
+ xe_bo_lock(bo, false);
+
+ xe_assert(xe, !bo->backup_obj);
+
+ /*
+ * Since this is called from the PM notifier we might have raced with
+ * someone unpinning this after we dropped the pinned list lock and
+ * grabbing the above bo lock.
+ */
+ if (!xe_bo_is_pinned(bo))
+ goto out_unlock_bo;
+
+ if (!xe_bo_is_vram(bo))
+ goto out_unlock_bo;
+
+ if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
+ goto out_unlock_bo;
+
+ backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size,
+ DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
+ if (IS_ERR(backup)) {
+ ret = PTR_ERR(backup);
+ goto out_unlock_bo;
+ }
+
+ backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
+ ttm_bo_pin(&backup->ttm);
+ bo->backup_obj = backup;
+
+out_unlock_bo:
+ xe_bo_unlock(bo);
return ret;
}
/**
+ * xe_bo_notifier_unprepare_pinned() - Undo the previous prepare operation.
+ * @bo: The buffer object to undo the prepare for.
+ *
+ * Always returns 0. The backup object is removed, if still present. Expectation
+ * it that this called from the PM notifier when undoing the prepare step.
+ *
+ * Return: Always returns 0.
+ */
+int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo)
+{
+ xe_bo_lock(bo, false);
+ if (bo->backup_obj) {
+ ttm_bo_unpin(&bo->backup_obj->ttm);
+ xe_bo_put(bo->backup_obj);
+ bo->backup_obj = NULL;
+ }
+ xe_bo_unlock(bo);
+
+ return 0;
+}
+
+/**
* xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
* @bo: The buffer object to move.
*
- * On successful completion, the object memory will be moved to sytem memory.
+ * On successful completion, the object memory will be moved to system memory.
*
* This is needed to for special handling of pinned VRAM object during
* suspend-resume.
@@ -865,58 +1175,99 @@ out:
*/
int xe_bo_evict_pinned(struct xe_bo *bo)
{
- struct ttm_place place = {
- .mem_type = XE_PL_TT,
- };
- struct ttm_placement placement = {
- .placement = &place,
- .num_placement = 1,
- };
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- };
- struct ttm_resource *new_mem;
- int ret;
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_bo *backup = bo->backup_obj;
+ bool backup_created = false;
+ bool unmap = false;
+ int ret = 0;
- xe_bo_assert_held(bo);
+ xe_bo_lock(bo, false);
- if (WARN_ON(!bo->ttm.resource))
- return -EINVAL;
+ if (WARN_ON(!bo->ttm.resource)) {
+ ret = -EINVAL;
+ goto out_unlock_bo;
+ }
- if (WARN_ON(!xe_bo_is_pinned(bo)))
- return -EINVAL;
+ if (WARN_ON(!xe_bo_is_pinned(bo))) {
+ ret = -EINVAL;
+ goto out_unlock_bo;
+ }
- if (WARN_ON(!xe_bo_is_vram(bo)))
- return -EINVAL;
+ if (!xe_bo_is_vram(bo))
+ goto out_unlock_bo;
- ret = ttm_bo_mem_space(&bo->ttm, &placement, &new_mem, &ctx);
- if (ret)
- return ret;
+ if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
+ goto out_unlock_bo;
- if (!bo->ttm.ttm) {
- bo->ttm.ttm = xe_ttm_tt_create(&bo->ttm, 0);
- if (!bo->ttm.ttm) {
- ret = -ENOMEM;
- goto err_res_free;
+ if (!backup) {
+ backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size,
+ DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED);
+ if (IS_ERR(backup)) {
+ ret = PTR_ERR(backup);
+ goto out_unlock_bo;
}
+ backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
+ backup_created = true;
}
- ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
- if (ret)
- goto err_res_free;
+ if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
+ struct xe_migrate *migrate;
+ struct dma_fence *fence;
- ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
- if (ret)
- goto err_res_free;
+ if (bo->tile)
+ migrate = bo->tile->migrate;
+ else
+ migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
- ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
- if (ret)
- goto err_res_free;
+ ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
+ if (ret)
+ goto out_backup;
- return 0;
+ ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
+ if (ret)
+ goto out_backup;
-err_res_free:
- ttm_resource_free(&bo->ttm, &new_mem);
+ fence = xe_migrate_copy(migrate, bo, backup, bo->ttm.resource,
+ backup->ttm.resource, false);
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ goto out_backup;
+ }
+
+ dma_resv_add_fence(bo->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_resv_add_fence(backup->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_fence_put(fence);
+ } else {
+ ret = xe_bo_vmap(backup);
+ if (ret)
+ goto out_backup;
+
+ if (iosys_map_is_null(&bo->vmap)) {
+ ret = xe_bo_vmap(bo);
+ if (ret)
+ goto out_backup;
+ unmap = true;
+ }
+
+ xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0,
+ bo->size);
+ }
+
+ if (!bo->backup_obj)
+ bo->backup_obj = backup;
+
+out_backup:
+ xe_bo_vunmap(backup);
+ if (ret && backup_created)
+ xe_bo_put(backup);
+out_unlock_bo:
+ if (unmap)
+ xe_bo_vunmap(bo);
+ xe_bo_unlock(bo);
return ret;
}
@@ -935,50 +1286,117 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
+ .gfp_retry_mayfail = false,
};
- struct ttm_resource *new_mem;
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_bo *backup = bo->backup_obj;
+ bool unmap = false;
int ret;
- xe_bo_assert_held(bo);
+ if (!backup)
+ return 0;
- if (WARN_ON(!bo->ttm.resource))
- return -EINVAL;
+ xe_bo_lock(bo, false);
- if (WARN_ON(!xe_bo_is_pinned(bo)))
- return -EINVAL;
+ if (!xe_bo_is_pinned(backup)) {
+ ret = ttm_bo_validate(&backup->ttm, &backup->placement, &ctx);
+ if (ret)
+ goto out_unlock_bo;
+ }
- if (WARN_ON(xe_bo_is_vram(bo) || !bo->ttm.ttm))
- return -EINVAL;
+ if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
+ struct xe_migrate *migrate;
+ struct dma_fence *fence;
- ret = ttm_bo_mem_space(&bo->ttm, &bo->placement, &new_mem, &ctx);
- if (ret)
- return ret;
+ if (bo->tile)
+ migrate = bo->tile->migrate;
+ else
+ migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
- ret = ttm_tt_populate(bo->ttm.bdev, bo->ttm.ttm, &ctx);
- if (ret)
- goto err_res_free;
+ ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
+ if (ret)
+ goto out_unlock_bo;
- ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
- if (ret)
- goto err_res_free;
+ ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
+ if (ret)
+ goto out_unlock_bo;
- ret = xe_bo_move(&bo->ttm, false, &ctx, new_mem, NULL);
- if (ret)
- goto err_res_free;
+ fence = xe_migrate_copy(migrate, backup, bo,
+ backup->ttm.resource, bo->ttm.resource,
+ false);
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ goto out_unlock_bo;
+ }
- return 0;
+ dma_resv_add_fence(bo->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_resv_add_fence(backup->ttm.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
+ dma_fence_put(fence);
+ } else {
+ ret = xe_bo_vmap(backup);
+ if (ret)
+ goto out_unlock_bo;
+
+ if (iosys_map_is_null(&bo->vmap)) {
+ ret = xe_bo_vmap(bo);
+ if (ret)
+ goto out_backup;
+ unmap = true;
+ }
+
+ xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr,
+ bo->size);
+ }
-err_res_free:
- ttm_resource_free(&bo->ttm, &new_mem);
+ bo->backup_obj = NULL;
+
+out_backup:
+ xe_bo_vunmap(backup);
+ if (!bo->backup_obj) {
+ if (xe_bo_is_pinned(backup))
+ ttm_bo_unpin(&backup->ttm);
+ xe_bo_put(backup);
+ }
+out_unlock_bo:
+ if (unmap)
+ xe_bo_vunmap(bo);
+ xe_bo_unlock(bo);
return ret;
}
+int xe_bo_dma_unmap_pinned(struct xe_bo *bo)
+{
+ struct ttm_buffer_object *ttm_bo = &bo->ttm;
+ struct ttm_tt *tt = ttm_bo->ttm;
+
+ if (tt) {
+ struct xe_ttm_tt *xe_tt = container_of(tt, typeof(*xe_tt), ttm);
+
+ if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
+ dma_buf_unmap_attachment(ttm_bo->base.import_attach,
+ ttm_bo->sg,
+ DMA_BIDIRECTIONAL);
+ ttm_bo->sg = NULL;
+ xe_tt->sg = NULL;
+ } else if (xe_tt->sg) {
+ dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg,
+ DMA_BIDIRECTIONAL, 0);
+ sg_free_table(xe_tt->sg);
+ xe_tt->sg = NULL;
+ }
+ }
+
+ return 0;
+}
+
static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
unsigned long page_offset)
{
struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
struct xe_res_cursor cursor;
- struct xe_mem_region *vram;
+ struct xe_vram_region *vram;
if (ttm_bo->resource->mem_type == XE_PL_STOLEN)
return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT;
@@ -1082,6 +1500,87 @@ static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
}
}
+static void xe_ttm_bo_purge(struct ttm_buffer_object *ttm_bo, struct ttm_operation_ctx *ctx)
+{
+ struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+
+ if (ttm_bo->ttm) {
+ struct ttm_placement place = {};
+ int ret = ttm_bo_validate(ttm_bo, &place, ctx);
+
+ drm_WARN_ON(&xe->drm, ret);
+ }
+}
+
+static void xe_ttm_bo_swap_notify(struct ttm_buffer_object *ttm_bo)
+{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .gfp_retry_mayfail = false,
+ };
+
+ if (ttm_bo->ttm) {
+ struct xe_ttm_tt *xe_tt =
+ container_of(ttm_bo->ttm, struct xe_ttm_tt, ttm);
+
+ if (xe_tt->purgeable)
+ xe_ttm_bo_purge(ttm_bo, &ctx);
+ }
+}
+
+static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
+ unsigned long offset, void *buf, int len,
+ int write)
+{
+ struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
+ struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+ struct iosys_map vmap;
+ struct xe_res_cursor cursor;
+ struct xe_vram_region *vram;
+ int bytes_left = len;
+ int err = 0;
+
+ xe_bo_assert_held(bo);
+ xe_device_assert_mem_access(xe);
+
+ if (!mem_type_is_vram(ttm_bo->resource->mem_type))
+ return -EIO;
+
+ if (!xe_ttm_resource_visible(ttm_bo->resource) || len >= SZ_16K) {
+ struct xe_migrate *migrate =
+ mem_type_to_migrate(xe, ttm_bo->resource->mem_type);
+
+ err = xe_migrate_access_memory(migrate, bo, offset, buf, len,
+ write);
+ goto out;
+ }
+
+ vram = res_to_mem_region(ttm_bo->resource);
+ xe_res_first(ttm_bo->resource, offset & PAGE_MASK,
+ bo->size - (offset & PAGE_MASK), &cursor);
+
+ do {
+ unsigned long page_offset = (offset & ~PAGE_MASK);
+ int byte_count = min((int)(PAGE_SIZE - page_offset), bytes_left);
+
+ iosys_map_set_vaddr_iomem(&vmap, (u8 __iomem *)vram->mapping +
+ cursor.start);
+ if (write)
+ xe_map_memcpy_to(xe, &vmap, page_offset, buf, byte_count);
+ else
+ xe_map_memcpy_from(xe, buf, &vmap, page_offset, byte_count);
+
+ buf += byte_count;
+ offset += byte_count;
+ bytes_left -= byte_count;
+ if (bytes_left)
+ xe_res_next(&cursor, PAGE_SIZE);
+ } while (bytes_left);
+
+out:
+ return err ?: len;
+}
+
const struct ttm_device_funcs xe_ttm_funcs = {
.ttm_tt_create = xe_ttm_tt_create,
.ttm_tt_populate = xe_ttm_tt_populate,
@@ -1091,15 +1590,19 @@ const struct ttm_device_funcs xe_ttm_funcs = {
.move = xe_bo_move,
.io_mem_reserve = xe_ttm_io_mem_reserve,
.io_mem_pfn = xe_ttm_io_mem_pfn,
+ .access_memory = xe_ttm_access_memory,
.release_notify = xe_ttm_bo_release_notify,
- .eviction_valuable = ttm_bo_eviction_valuable,
+ .eviction_valuable = xe_bo_eviction_valuable,
.delete_mem_notify = xe_ttm_bo_delete_mem_notify,
+ .swap_notify = xe_ttm_bo_swap_notify,
};
static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
{
struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
+ struct xe_tile *tile;
+ u8 id;
if (bo->ttm.base.import_attach)
drm_prime_gem_destroy(&bo->ttm.base, NULL);
@@ -1107,8 +1610,9 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));
- if (bo->ggtt_node && bo->ggtt_node->base.size)
- xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo);
+ for_each_tile(tile, xe, id)
+ if (bo->ggtt_node[id] && bo->ggtt_node[id]->base.size)
+ xe_ggtt_remove_bo(tile->mem.ggtt, bo);
#ifdef CONFIG_PROC_FS
if (bo->client)
@@ -1118,6 +1622,9 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
if (bo->vm && xe_bo_is_user(bo))
xe_vm_put(bo->vm);
+ if (bo->parent_obj)
+ xe_bo_put(bo->parent_obj);
+
mutex_lock(&xe->mem_access.vram_userfault.lock);
if (!list_empty(&bo->vram_userfault_link))
list_del(&bo->vram_userfault_link);
@@ -1206,11 +1713,50 @@ out:
return ret;
}
+static int xe_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write)
+{
+ struct ttm_buffer_object *ttm_bo = vma->vm_private_data;
+ struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
+ struct xe_device *xe = xe_bo_device(bo);
+ int ret;
+
+ xe_pm_runtime_get(xe);
+ ret = ttm_bo_vm_access(vma, addr, buf, len, write);
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+
+/**
+ * xe_bo_read() - Read from an xe_bo
+ * @bo: The buffer object to read from.
+ * @offset: The byte offset to start reading from.
+ * @dst: Location to store the read.
+ * @size: Size in bytes for the read.
+ *
+ * Read @size bytes from the @bo, starting from @offset, storing into @dst.
+ *
+ * Return: Zero on success, or negative error.
+ */
+int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size)
+{
+ int ret;
+
+ ret = ttm_bo_access(&bo->ttm, offset, dst, size, 0);
+ if (ret >= 0 && ret != size)
+ ret = -EIO;
+ else if (ret == size)
+ ret = 0;
+
+ return ret;
+}
+
static const struct vm_operations_struct xe_gem_vm_ops = {
.fault = xe_gem_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
- .access = ttm_bo_vm_access
+ .access = xe_bo_vm_access,
};
static const struct drm_gem_object_funcs xe_gem_object_funcs = {
@@ -1224,7 +1770,7 @@ static const struct drm_gem_object_funcs xe_gem_object_funcs = {
/**
* xe_bo_alloc - Allocate storage for a struct xe_bo
*
- * This funcition is intended to allocate storage to be used for input
+ * This function is intended to allocate storage to be used for input
* to __xe_bo_create_locked(), in the case a pointer to the bo to be
* created is needed before the call to __xe_bo_create_locked().
* If __xe_bo_create_locked ends up never to be called, then the
@@ -1264,6 +1810,7 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
+ .gfp_retry_mayfail = true,
};
struct ttm_placement *placement;
uint32_t alignment;
@@ -1278,6 +1825,10 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
return ERR_PTR(-EINVAL);
}
+ /* XE_BO_FLAG_GGTTx requires XE_BO_FLAG_GGTT also be set */
+ if ((flags & XE_BO_FLAG_GGTT_ALL) && !(flags & XE_BO_FLAG_GGTT))
+ return ERR_PTR(-EINVAL);
+
if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) &&
!(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) &&
((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) ||
@@ -1424,7 +1975,8 @@ static struct xe_bo *
__xe_bo_create_locked(struct xe_device *xe,
struct xe_tile *tile, struct xe_vm *vm,
size_t size, u64 start, u64 end,
- u16 cpu_caching, enum ttm_bo_type type, u32 flags)
+ u16 cpu_caching, enum ttm_bo_type type, u32 flags,
+ u64 alignment)
{
struct xe_bo *bo = NULL;
int err;
@@ -1453,6 +2005,8 @@ __xe_bo_create_locked(struct xe_device *xe,
if (IS_ERR(bo))
return bo;
+ bo->min_align = alignment;
+
/*
* Note that instead of taking a reference no the drm_gpuvm_resv_bo(),
* to ensure the shared resv doesn't disappear under the bo, the bo
@@ -1465,21 +2019,32 @@ __xe_bo_create_locked(struct xe_device *xe,
bo->vm = vm;
if (bo->flags & XE_BO_FLAG_GGTT) {
- if (!tile && flags & XE_BO_FLAG_STOLEN)
- tile = xe_device_get_root_tile(xe);
+ struct xe_tile *t;
+ u8 id;
- xe_assert(xe, tile);
+ if (!(bo->flags & XE_BO_FLAG_GGTT_ALL)) {
+ if (!tile && flags & XE_BO_FLAG_STOLEN)
+ tile = xe_device_get_root_tile(xe);
- if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
- err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo,
- start + bo->size, U64_MAX);
- } else {
- err = xe_ggtt_insert_bo(tile->mem.ggtt, bo);
+ xe_assert(xe, tile);
+ }
+
+ for_each_tile(t, xe, id) {
+ if (t != tile && !(bo->flags & XE_BO_FLAG_GGTTx(t)))
+ continue;
+
+ if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
+ err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo,
+ start + bo->size, U64_MAX);
+ } else {
+ err = xe_ggtt_insert_bo(t->mem.ggtt, bo);
+ }
+ if (err)
+ goto err_unlock_put_bo;
}
- if (err)
- goto err_unlock_put_bo;
}
+ trace_xe_bo_create(bo);
return bo;
err_unlock_put_bo:
@@ -1493,16 +2058,18 @@ struct xe_bo *
xe_bo_create_locked_range(struct xe_device *xe,
struct xe_tile *tile, struct xe_vm *vm,
size_t size, u64 start, u64 end,
- enum ttm_bo_type type, u32 flags)
+ enum ttm_bo_type type, u32 flags, u64 alignment)
{
- return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, flags);
+ return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type,
+ flags, alignment);
}
struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
enum ttm_bo_type type, u32 flags)
{
- return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, flags);
+ return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type,
+ flags, 0);
}
struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
@@ -1512,7 +2079,7 @@ struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
{
struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
cpu_caching, ttm_bo_type_device,
- flags | XE_BO_FLAG_USER);
+ flags | XE_BO_FLAG_USER, 0);
if (!IS_ERR(bo))
xe_bo_unlock_vm_held(bo);
@@ -1536,6 +2103,17 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile
size_t size, u64 offset,
enum ttm_bo_type type, u32 flags)
{
+ return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset,
+ type, flags, 0);
+}
+
+struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
+ struct xe_tile *tile,
+ struct xe_vm *vm,
+ size_t size, u64 offset,
+ enum ttm_bo_type type, u32 flags,
+ u64 alignment)
+{
struct xe_bo *bo;
int err;
u64 start = offset == ~0ull ? 0 : offset;
@@ -1546,7 +2124,8 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile
flags |= XE_BO_FLAG_GGTT;
bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
- flags | XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED,
+ alignment);
if (IS_ERR(bo))
return bo;
@@ -1603,6 +2182,8 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile
struct xe_bo *bo;
int ret;
+ KUNIT_STATIC_STUB_REDIRECT(xe_managed_bo_create_pin_map, xe, tile, size, flags);
+
bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags);
if (IS_ERR(bo))
return bo;
@@ -1645,7 +2226,8 @@ int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, str
struct xe_bo *bo;
u32 dst_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT;
- dst_flags |= (*src)->flags & XE_BO_FLAG_GGTT_INVALIDATE;
+ dst_flags |= (*src)->flags & (XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
xe_assert(xe, IS_DGFX(xe));
xe_assert(xe, !(*src)->vmap.is_iomem);
@@ -1669,10 +2251,16 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res)
{
struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
- if (res->mem_type == XE_PL_STOLEN)
+ switch (res->mem_type) {
+ case XE_PL_STOLEN:
return xe_ttm_stolen_gpu_offset(xe);
-
- return res_to_mem_region(res)->dpa_base;
+ case XE_PL_TT:
+ case XE_PL_SYSTEM:
+ return 0;
+ default:
+ return res_to_mem_region(res)->dpa_base;
+ }
+ return 0;
}
/**
@@ -1698,15 +2286,14 @@ int xe_bo_pin_external(struct xe_bo *bo)
if (err)
return err;
- if (xe_bo_is_vram(bo)) {
- spin_lock(&xe->pinned.lock);
- list_add_tail(&bo->pinned_link,
- &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- }
+ spin_lock(&xe->pinned.lock);
+ list_add_tail(&bo->pinned_link, &xe->pinned.late.external);
+ spin_unlock(&xe->pinned.lock);
}
ttm_bo_pin(&bo->ttm);
+ if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
+ xe_ttm_tt_account_subtract(bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -1719,6 +2306,7 @@ int xe_bo_pin_external(struct xe_bo *bo)
int xe_bo_pin(struct xe_bo *bo)
{
+ struct ttm_place *place = &bo->placements[0];
struct xe_device *xe = xe_bo_device(bo);
int err;
@@ -1742,29 +2330,18 @@ int xe_bo_pin(struct xe_bo *bo)
if (err)
return err;
- /*
- * For pinned objects in on DGFX, which are also in vram, we expect
- * these to be in contiguous VRAM memory. Required eviction / restore
- * during suspend / resume (force restore to same physical address).
- */
- if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
- bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
- struct ttm_place *place = &(bo->placements[0]);
-
- if (mem_type_is_vram(place->mem_type)) {
- xe_assert(xe, place->flags & TTM_PL_FLAG_CONTIGUOUS);
-
- place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
- vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
- place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT);
-
- spin_lock(&xe->pinned.lock);
- list_add_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
- spin_unlock(&xe->pinned.lock);
- }
+ if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
+ spin_lock(&xe->pinned.lock);
+ if (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)
+ list_add_tail(&bo->pinned_link, &xe->pinned.late.kernel_bo_present);
+ else
+ list_add_tail(&bo->pinned_link, &xe->pinned.early.kernel_bo_present);
+ spin_unlock(&xe->pinned.lock);
}
ttm_bo_pin(&bo->ttm);
+ if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
+ xe_ttm_tt_account_subtract(bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -1799,6 +2376,8 @@ void xe_bo_unpin_external(struct xe_bo *bo)
spin_unlock(&xe->pinned.lock);
ttm_bo_unpin(&bo->ttm);
+ if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
+ xe_ttm_tt_account_add(bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -1809,24 +2388,28 @@ void xe_bo_unpin_external(struct xe_bo *bo)
void xe_bo_unpin(struct xe_bo *bo)
{
+ struct ttm_place *place = &bo->placements[0];
struct xe_device *xe = xe_bo_device(bo);
xe_assert(xe, !bo->ttm.base.import_attach);
xe_assert(xe, xe_bo_is_pinned(bo));
- if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
- bo->flags & XE_BO_FLAG_INTERNAL_TEST)) {
- struct ttm_place *place = &(bo->placements[0]);
+ if (mem_type_is_vram(place->mem_type) || bo->flags & XE_BO_FLAG_GGTT) {
+ spin_lock(&xe->pinned.lock);
+ xe_assert(xe, !list_empty(&bo->pinned_link));
+ list_del_init(&bo->pinned_link);
+ spin_unlock(&xe->pinned.lock);
- if (mem_type_is_vram(place->mem_type)) {
- spin_lock(&xe->pinned.lock);
- xe_assert(xe, !list_empty(&bo->pinned_link));
- list_del_init(&bo->pinned_link);
- spin_unlock(&xe->pinned.lock);
+ if (bo->backup_obj) {
+ if (xe_bo_is_pinned(bo->backup_obj))
+ ttm_bo_unpin(&bo->backup_obj->ttm);
+ xe_bo_put(bo->backup_obj);
+ bo->backup_obj = NULL;
}
}
-
ttm_bo_unpin(&bo->ttm);
+ if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
+ xe_ttm_tt_account_add(bo->ttm.ttm);
}
/**
@@ -1850,7 +2433,10 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
+ .gfp_retry_mayfail = true,
};
+ struct pin_cookie cookie;
+ int ret;
if (vm) {
lockdep_assert_held(&vm->lock);
@@ -1860,7 +2446,12 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
ctx.resv = xe_vm_resv(vm);
}
- return ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
+ cookie = xe_vm_set_validating(vm, allow_res_evict);
+ trace_xe_bo_validate(bo);
+ ret = ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
+ xe_vm_clear_validating(vm, allow_res_evict, cookie);
+
+ return ret;
}
bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo)
@@ -1911,13 +2502,15 @@ dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
int xe_bo_vmap(struct xe_bo *bo)
{
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
void *virtual;
bool is_iomem;
int ret;
xe_bo_assert_held(bo);
- if (!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS))
+ if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) ||
+ !force_contiguous(bo->flags)))
return -EINVAL;
if (!iosys_map_is_null(&bo->vmap))
@@ -1957,6 +2550,93 @@ void xe_bo_vunmap(struct xe_bo *bo)
__xe_bo_vunmap(bo);
}
+static int gem_create_set_pxp_type(struct xe_device *xe, struct xe_bo *bo, u64 value)
+{
+ if (value == DRM_XE_PXP_TYPE_NONE)
+ return 0;
+
+ /* we only support DRM_XE_PXP_TYPE_HWDRM for now */
+ if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM))
+ return -EINVAL;
+
+ return xe_pxp_key_assign(xe->pxp, bo);
+}
+
+typedef int (*xe_gem_create_set_property_fn)(struct xe_device *xe,
+ struct xe_bo *bo,
+ u64 value);
+
+static const xe_gem_create_set_property_fn gem_create_set_property_funcs[] = {
+ [DRM_XE_GEM_CREATE_SET_PROPERTY_PXP_TYPE] = gem_create_set_pxp_type,
+};
+
+static int gem_create_user_ext_set_property(struct xe_device *xe,
+ struct xe_bo *bo,
+ u64 extension)
+{
+ u64 __user *address = u64_to_user_ptr(extension);
+ struct drm_xe_ext_set_property ext;
+ int err;
+ u32 idx;
+
+ err = copy_from_user(&ext, address, sizeof(ext));
+ if (XE_IOCTL_DBG(xe, err))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(xe, ext.property >=
+ ARRAY_SIZE(gem_create_set_property_funcs)) ||
+ XE_IOCTL_DBG(xe, ext.pad) ||
+ XE_IOCTL_DBG(xe, ext.property != DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.property, ARRAY_SIZE(gem_create_set_property_funcs));
+ if (!gem_create_set_property_funcs[idx])
+ return -EINVAL;
+
+ return gem_create_set_property_funcs[idx](xe, bo, ext.value);
+}
+
+typedef int (*xe_gem_create_user_extension_fn)(struct xe_device *xe,
+ struct xe_bo *bo,
+ u64 extension);
+
+static const xe_gem_create_user_extension_fn gem_create_user_extension_funcs[] = {
+ [DRM_XE_GEM_CREATE_EXTENSION_SET_PROPERTY] = gem_create_user_ext_set_property,
+};
+
+#define MAX_USER_EXTENSIONS 16
+static int gem_create_user_extensions(struct xe_device *xe, struct xe_bo *bo,
+ u64 extensions, int ext_number)
+{
+ u64 __user *address = u64_to_user_ptr(extensions);
+ struct drm_xe_user_extension ext;
+ int err;
+ u32 idx;
+
+ if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
+ return -E2BIG;
+
+ err = copy_from_user(&ext, address, sizeof(ext));
+ if (XE_IOCTL_DBG(xe, err))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(xe, ext.pad) ||
+ XE_IOCTL_DBG(xe, ext.name >= ARRAY_SIZE(gem_create_user_extension_funcs)))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.name,
+ ARRAY_SIZE(gem_create_user_extension_funcs));
+ err = gem_create_user_extension_funcs[idx](xe, bo, extensions);
+ if (XE_IOCTL_DBG(xe, err))
+ return err;
+
+ if (ext.next_extension)
+ return gem_create_user_extensions(xe, bo, ext.next_extension,
+ ++ext_number);
+
+ return 0;
+}
+
int xe_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -1964,13 +2644,13 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
struct xe_file *xef = to_xe_file(file);
struct drm_xe_gem_create *args = data;
struct xe_vm *vm = NULL;
+ ktime_t end = 0;
struct xe_bo *bo;
unsigned int bo_flags;
u32 handle;
int err;
- if (XE_IOCTL_DBG(xe, args->extensions) ||
- XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
+ if (XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) ||
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
@@ -2036,6 +2716,10 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
vm = xe_vm_lookup(xef, args->vm_id);
if (XE_IOCTL_DBG(xe, !vm))
return -ENOENT;
+ }
+
+retry:
+ if (vm) {
err = xe_vm_lock(vm, true);
if (err)
goto out_vm;
@@ -2049,9 +2733,17 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
+ if (xe_vm_validate_should_retry(NULL, err, &end))
+ goto retry;
goto out_vm;
}
+ if (args->extensions) {
+ err = gem_create_user_extensions(xe, bo, args->extensions, 0);
+ if (err)
+ goto out_bulk;
+ }
+
err = drm_gem_handle_create(file, &bo->ttm.base, &handle);
if (err)
goto out_bulk;
@@ -2085,9 +2777,26 @@ int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, args->flags))
+ if (XE_IOCTL_DBG(xe, args->flags &
+ ~DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER))
return -EINVAL;
+ if (args->flags & DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER) {
+ if (XE_IOCTL_DBG(xe, !IS_DGFX(xe)))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, args->handle))
+ return -EINVAL;
+
+ if (XE_IOCTL_DBG(xe, PAGE_SIZE > SZ_4K))
+ return -EINVAL;
+
+ BUILD_BUG_ON(((XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT) +
+ SZ_4K) >= DRM_FILE_PAGE_OFFSET_START);
+ args->offset = XE_PCI_BARRIER_MMAP_OFFSET;
+ return 0;
+ }
+
gem_obj = drm_gem_object_lookup(file, args->handle);
if (XE_IOCTL_DBG(xe, !gem_obj))
return -ENOENT;
@@ -2193,6 +2902,7 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
+ .gfp_retry_mayfail = true,
};
struct ttm_placement placement;
struct ttm_place requested;
@@ -2230,19 +2940,18 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
/**
* xe_bo_evict - Evict an object to evict placement
* @bo: The buffer object to migrate.
- * @force_alloc: Set force_alloc in ttm_operation_ctx
*
* On successful completion, the object memory will be moved to evict
- * placement. Ths function blocks until the object has been fully moved.
+ * placement. This function blocks until the object has been fully moved.
*
* Return: 0 on success. Negative error code on failure.
*/
-int xe_bo_evict(struct xe_bo *bo, bool force_alloc)
+int xe_bo_evict(struct xe_bo *bo)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
- .force_alloc = force_alloc,
+ .gfp_retry_mayfail = true,
};
struct ttm_placement placement;
int ret;
@@ -2320,16 +3029,45 @@ void xe_bo_put_commit(struct llist_head *deferred)
drm_gem_object_free(&bo->ttm.base.refcount);
}
+static void xe_bo_dev_work_func(struct work_struct *work)
+{
+ struct xe_bo_dev *bo_dev = container_of(work, typeof(*bo_dev), async_free);
+
+ xe_bo_put_commit(&bo_dev->async_list);
+}
+
+/**
+ * xe_bo_dev_init() - Initialize BO dev to manage async BO freeing
+ * @bo_dev: The BO dev structure
+ */
+void xe_bo_dev_init(struct xe_bo_dev *bo_dev)
+{
+ INIT_WORK(&bo_dev->async_free, xe_bo_dev_work_func);
+}
+
+/**
+ * xe_bo_dev_fini() - Finalize BO dev managing async BO freeing
+ * @bo_dev: The BO dev structure
+ */
+void xe_bo_dev_fini(struct xe_bo_dev *bo_dev)
+{
+ flush_work(&bo_dev->async_free);
+}
+
void xe_bo_put(struct xe_bo *bo)
{
+ struct xe_tile *tile;
+ u8 id;
+
might_sleep();
if (bo) {
#ifdef CONFIG_PROC_FS
if (bo->client)
might_lock(&bo->client->bos_lock);
#endif
- if (bo->ggtt_node && bo->ggtt_node->ggtt)
- might_lock(&bo->ggtt_node->ggtt->lock);
+ for_each_tile(tile, xe_bo_device(bo), id)
+ if (bo->ggtt_node[id] && bo->ggtt_node[id]->ggtt)
+ might_lock(&bo->ggtt_node[id]->ggtt->lock);
drm_gem_object_put(&bo->ttm.base);
}
}
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 6e4be52306df..02ada1fb8a23 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -39,10 +39,26 @@
#define XE_BO_FLAG_NEEDS_64K BIT(15)
#define XE_BO_FLAG_NEEDS_2M BIT(16)
#define XE_BO_FLAG_GGTT_INVALIDATE BIT(17)
+#define XE_BO_FLAG_PINNED_NORESTORE BIT(18)
+#define XE_BO_FLAG_PINNED_LATE_RESTORE BIT(19)
+#define XE_BO_FLAG_GGTT0 BIT(20)
+#define XE_BO_FLAG_GGTT1 BIT(21)
+#define XE_BO_FLAG_GGTT2 BIT(22)
+#define XE_BO_FLAG_GGTT3 BIT(23)
+#define XE_BO_FLAG_CPU_ADDR_MIRROR BIT(24)
+
/* this one is trigger internally only */
#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
#define XE_BO_FLAG_INTERNAL_64K BIT(31)
+#define XE_BO_FLAG_GGTT_ALL (XE_BO_FLAG_GGTT0 | \
+ XE_BO_FLAG_GGTT1 | \
+ XE_BO_FLAG_GGTT2 | \
+ XE_BO_FLAG_GGTT3)
+
+#define XE_BO_FLAG_GGTTx(tile) \
+ (XE_BO_FLAG_GGTT0 << (tile)->id)
+
#define XE_PTE_SHIFT 12
#define XE_PAGE_SIZE (1 << XE_PTE_SHIFT)
#define XE_PTE_MASK (XE_PAGE_SIZE - 1)
@@ -63,6 +79,8 @@
#define XE_BO_PROPS_INVALID (-1)
+#define XE_PCI_BARRIER_MMAP_OFFSET (0x50 << XE_PTE_SHIFT)
+
struct sg_table;
struct xe_bo *xe_bo_alloc(void);
@@ -77,7 +95,7 @@ struct xe_bo *
xe_bo_create_locked_range(struct xe_device *xe,
struct xe_tile *tile, struct xe_vm *vm,
size_t size, u64 start, u64 end,
- enum ttm_bo_type type, u32 flags);
+ enum ttm_bo_type type, u32 flags, u64 alignment);
struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
enum ttm_bo_type type, u32 flags);
@@ -94,6 +112,12 @@ struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size, u64 offset,
enum ttm_bo_type type, u32 flags);
+struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
+ struct xe_tile *tile,
+ struct xe_vm *vm,
+ size_t size, u64 offset,
+ enum ttm_bo_type type, u32 flags,
+ u64 alignment);
struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
const void *data, size_t size,
enum ttm_bo_type type, u32 flags);
@@ -128,6 +152,28 @@ static inline struct xe_bo *xe_bo_get(struct xe_bo *bo)
void xe_bo_put(struct xe_bo *bo);
+/*
+ * xe_bo_get_unless_zero() - Conditionally obtain a GEM object refcount on an
+ * xe bo
+ * @bo: The bo for which we want to obtain a refcount.
+ *
+ * There is a short window between where the bo's GEM object refcount reaches
+ * zero and where we put the final ttm_bo reference. Code in the eviction- and
+ * shrinking path should therefore attempt to grab a gem object reference before
+ * trying to use members outside of the base class ttm object. This function is
+ * intended for that purpose. On successful return, this function must be paired
+ * with an xe_bo_put().
+ *
+ * Return: @bo on success, NULL on failure.
+ */
+static inline __must_check struct xe_bo *xe_bo_get_unless_zero(struct xe_bo *bo)
+{
+ if (!bo || !kref_get_unless_zero(&bo->ttm.base.refcount))
+ return NULL;
+
+ return bo;
+}
+
static inline void __xe_bo_unset_bulk_move(struct xe_bo *bo)
{
if (bo)
@@ -166,6 +212,11 @@ static inline bool xe_bo_is_pinned(struct xe_bo *bo)
return bo->ttm.pin_count;
}
+static inline bool xe_bo_is_protected(const struct xe_bo *bo)
+{
+ return bo->pxp_key_instance;
+}
+
static inline void xe_bo_unpin_map_no_vm(struct xe_bo *bo)
{
if (likely(bo)) {
@@ -188,34 +239,50 @@ xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
}
static inline u32
-xe_bo_ggtt_addr(struct xe_bo *bo)
+__xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id)
{
- if (XE_WARN_ON(!bo->ggtt_node))
+ struct xe_ggtt_node *ggtt_node = bo->ggtt_node[tile_id];
+
+ if (XE_WARN_ON(!ggtt_node))
return 0;
- XE_WARN_ON(bo->ggtt_node->base.size > bo->size);
- XE_WARN_ON(bo->ggtt_node->base.start + bo->ggtt_node->base.size > (1ull << 32));
- return bo->ggtt_node->base.start;
+ XE_WARN_ON(ggtt_node->base.size > bo->size);
+ XE_WARN_ON(ggtt_node->base.start + ggtt_node->base.size > (1ull << 32));
+ return ggtt_node->base.start;
+}
+
+static inline u32
+xe_bo_ggtt_addr(struct xe_bo *bo)
+{
+ xe_assert(xe_bo_device(bo), bo->tile);
+
+ return __xe_bo_ggtt_addr(bo, bo->tile->id);
}
int xe_bo_vmap(struct xe_bo *bo);
void xe_bo_vunmap(struct xe_bo *bo);
+int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size);
bool mem_type_is_vram(u32 mem_type);
bool xe_bo_is_vram(struct xe_bo *bo);
bool xe_bo_is_stolen(struct xe_bo *bo);
bool xe_bo_is_stolen_devmem(struct xe_bo *bo);
+bool xe_bo_is_vm_bound(struct xe_bo *bo);
bool xe_bo_has_single_placement(struct xe_bo *bo);
uint64_t vram_region_gpu_offset(struct ttm_resource *res);
bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
int xe_bo_migrate(struct xe_bo *bo, u32 mem_type);
-int xe_bo_evict(struct xe_bo *bo, bool force_alloc);
+int xe_bo_evict(struct xe_bo *bo);
int xe_bo_evict_pinned(struct xe_bo *bo);
+int xe_bo_notifier_prepare_pinned(struct xe_bo *bo);
+int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo);
int xe_bo_restore_pinned(struct xe_bo *bo);
+int xe_bo_dma_unmap_pinned(struct xe_bo *bo);
+
extern const struct ttm_device_funcs xe_ttm_funcs;
extern const char *const xe_mem_type_to_name[];
@@ -286,6 +353,25 @@ xe_bo_put_deferred(struct xe_bo *bo, struct llist_head *deferred)
void xe_bo_put_commit(struct llist_head *deferred);
+/**
+ * xe_bo_put_async() - Put BO async
+ * @bo: The bo to put.
+ *
+ * Put BO async, the final put is deferred to a worker to exit an IRQ context.
+ */
+static inline void
+xe_bo_put_async(struct xe_bo *bo)
+{
+ struct xe_bo_dev *bo_device = &xe_bo_device(bo)->bo_device;
+
+ if (xe_bo_put_deferred(bo, &bo_device->async_list))
+ schedule_work(&bo_device->async_free);
+}
+
+void xe_bo_dev_init(struct xe_bo_dev *bo_device);
+
+void xe_bo_dev_fini(struct xe_bo_dev *bo_device);
+
struct sg_table *xe_bo_sg(struct xe_bo *bo);
/*
@@ -312,9 +398,20 @@ static inline unsigned int xe_sg_segment_size(struct device *dev)
return round_down(max / 2, PAGE_SIZE);
}
-#define i915_gem_object_flush_if_display(obj) ((void)(obj))
+/**
+ * struct xe_bo_shrink_flags - flags governing the shrink behaviour.
+ * @purge: Only purging allowed. Don't shrink if bo not purgeable.
+ * @writeback: Attempt to immediately move content to swap.
+ */
+struct xe_bo_shrink_flags {
+ u32 purge : 1;
+ u32 writeback : 1;
+};
+
+long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
+ const struct xe_bo_shrink_flags flags,
+ unsigned long *scanned);
-#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
/**
* xe_bo_is_mem_type - Whether the bo currently resides in the given
* TTM memory type
@@ -329,4 +426,3 @@ static inline bool xe_bo_is_mem_type(struct xe_bo *bo, u32 mem_type)
return bo->ttm.resource->mem_type == mem_type;
}
#endif
-#endif
diff --git a/drivers/gpu/drm/xe/xe_bo_doc.h b/drivers/gpu/drm/xe/xe_bo_doc.h
index f57d440cc95a..25a884c64bf1 100644
--- a/drivers/gpu/drm/xe/xe_bo_doc.h
+++ b/drivers/gpu/drm/xe/xe_bo_doc.h
@@ -41,7 +41,7 @@
* created the BO can be mmap'd (via DRM_IOCTL_XE_GEM_MMAP_OFFSET) for user
* access and it can be bound for GPU access (via DRM_IOCTL_XE_VM_BIND). All
* user BOs are evictable and user BOs are never pinned by XE. The allocation of
- * the backing store can be defered from creation time until first use which is
+ * the backing store can be deferred from creation time until first use which is
* either mmap, bind, or pagefault.
*
* Private BOs
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
index 541b49007d73..ed3746d32b27 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.c
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -10,38 +10,121 @@
#include "xe_ggtt.h"
#include "xe_tile.h"
+typedef int (*xe_pinned_fn)(struct xe_bo *bo);
+
+static int xe_bo_apply_to_pinned(struct xe_device *xe,
+ struct list_head *pinned_list,
+ struct list_head *new_list,
+ const xe_pinned_fn pinned_fn)
+{
+ LIST_HEAD(still_in_list);
+ struct xe_bo *bo;
+ int ret = 0;
+
+ spin_lock(&xe->pinned.lock);
+ while (!ret) {
+ bo = list_first_entry_or_null(pinned_list, typeof(*bo),
+ pinned_link);
+ if (!bo)
+ break;
+ xe_bo_get(bo);
+ list_move_tail(&bo->pinned_link, &still_in_list);
+ spin_unlock(&xe->pinned.lock);
+
+ ret = pinned_fn(bo);
+ if (ret && pinned_list != new_list) {
+ spin_lock(&xe->pinned.lock);
+ /*
+ * We might no longer be pinned, since PM notifier can
+ * call this. If the pinned link is now empty, keep it
+ * that way.
+ */
+ if (!list_empty(&bo->pinned_link))
+ list_move(&bo->pinned_link, pinned_list);
+ spin_unlock(&xe->pinned.lock);
+ }
+ xe_bo_put(bo);
+ spin_lock(&xe->pinned.lock);
+ }
+ list_splice_tail(&still_in_list, new_list);
+ spin_unlock(&xe->pinned.lock);
+
+ return ret;
+}
+
/**
- * xe_bo_evict_all - evict all BOs from VRAM
+ * xe_bo_notifier_prepare_all_pinned() - Pre-allocate the backing pages for all
+ * pinned VRAM objects which need to be saved.
+ * @xe: xe device
+ *
+ * Should be called from PM notifier when preparing for s3/s4.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_bo_notifier_prepare_all_pinned(struct xe_device *xe)
+{
+ int ret;
+
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_notifier_prepare_pinned);
+ if (!ret)
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_notifier_prepare_pinned);
+
+ return ret;
+}
+
+/**
+ * xe_bo_notifier_unprepare_all_pinned() - Remove the backing pages for all
+ * pinned VRAM objects which have been restored.
+ * @xe: xe device
*
+ * Should be called from PM notifier after exiting s3/s4 (either on success or
+ * failure).
+ */
+void xe_bo_notifier_unprepare_all_pinned(struct xe_device *xe)
+{
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_notifier_unprepare_pinned);
+
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_notifier_unprepare_pinned);
+}
+
+/**
+ * xe_bo_evict_all_user - evict all non-pinned user BOs from VRAM
* @xe: xe device
*
- * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next
- * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU.
- * All eviction magic done via TTM calls.
+ * Evict non-pinned user BOs (via GPU).
*
* Evict == move VRAM BOs to temporary (typically system) memory.
- *
- * This function should be called before the device goes into a suspend state
- * where the VRAM loses power.
*/
-int xe_bo_evict_all(struct xe_device *xe)
+int xe_bo_evict_all_user(struct xe_device *xe)
{
struct ttm_device *bdev = &xe->ttm;
- struct xe_bo *bo;
- struct xe_tile *tile;
- struct list_head still_in_list;
u32 mem_type;
- u8 id;
int ret;
- if (!IS_DGFX(xe))
- return 0;
-
/* User memory */
- for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
+ for (mem_type = XE_PL_TT; mem_type <= XE_PL_VRAM1; ++mem_type) {
struct ttm_resource_manager *man =
ttm_manager_type(bdev, mem_type);
+ /*
+ * On igpu platforms with flat CCS we need to ensure we save and restore any CCS
+ * state since this state lives inside graphics stolen memory which doesn't survive
+ * hibernation.
+ *
+ * This can be further improved by only evicting objects that we know have actually
+ * used a compression enabled PAT index.
+ */
+ if (mem_type == XE_PL_TT && (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe)))
+ continue;
+
if (man) {
ret = ttm_resource_manager_evict_all(bdev, man);
if (ret)
@@ -49,34 +132,38 @@ int xe_bo_evict_all(struct xe_device *xe)
}
}
- /* Pinned user memory in VRAM */
- INIT_LIST_HEAD(&still_in_list);
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.external_vram,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- xe_bo_get(bo);
- list_move_tail(&bo->pinned_link, &still_in_list);
- spin_unlock(&xe->pinned.lock);
+ return 0;
+}
- xe_bo_lock(bo, false);
- ret = xe_bo_evict_pinned(bo);
- xe_bo_unlock(bo);
- xe_bo_put(bo);
- if (ret) {
- spin_lock(&xe->pinned.lock);
- list_splice_tail(&still_in_list,
- &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- return ret;
- }
+/**
+ * xe_bo_evict_all - evict all BOs from VRAM
+ * @xe: xe device
+ *
+ * Evict non-pinned user BOs first (via GPU), evict pinned external BOs next
+ * (via GPU), wait for evictions, and finally evict pinned kernel BOs via CPU.
+ * All eviction magic done via TTM calls.
+ *
+ * Evict == move VRAM BOs to temporary (typically system) memory.
+ *
+ * This function should be called before the device goes into a suspend state
+ * where the VRAM loses power.
+ */
+int xe_bo_evict_all(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ u8 id;
+ int ret;
- spin_lock(&xe->pinned.lock);
- }
- list_splice_tail(&still_in_list, &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
+ ret = xe_bo_evict_all_user(xe);
+ if (ret)
+ return ret;
+
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.evicted, xe_bo_evict_pinned);
+
+ if (!ret)
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.evicted, xe_bo_evict_pinned);
/*
* Wait for all user BO to be evicted as those evictions depend on the
@@ -85,32 +172,49 @@ int xe_bo_evict_all(struct xe_device *xe)
for_each_tile(tile, xe, id)
xe_tile_migrate_wait(tile);
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.kernel_bo_present,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- xe_bo_get(bo);
- list_move_tail(&bo->pinned_link, &xe->pinned.evicted);
- spin_unlock(&xe->pinned.lock);
+ if (ret)
+ return ret;
- xe_bo_lock(bo, false);
- ret = xe_bo_evict_pinned(bo);
- xe_bo_unlock(bo);
- xe_bo_put(bo);
- if (ret)
- return ret;
+ return xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.evicted,
+ xe_bo_evict_pinned);
+}
- spin_lock(&xe->pinned.lock);
+static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ int ret;
+
+ ret = xe_bo_restore_pinned(bo);
+ if (ret)
+ return ret;
+
+ if (bo->flags & XE_BO_FLAG_GGTT) {
+ struct xe_tile *tile;
+ u8 id;
+
+ for_each_tile(tile, xe_bo_device(bo), id) {
+ if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile)))
+ continue;
+
+ mutex_lock(&tile->mem.ggtt->lock);
+ xe_ggtt_map_bo(tile->mem.ggtt, bo);
+ mutex_unlock(&tile->mem.ggtt->lock);
+ }
}
- spin_unlock(&xe->pinned.lock);
+
+ /*
+ * We expect validate to trigger a move VRAM and our move code
+ * should setup the iosys map.
+ */
+ xe_assert(xe, !(bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE) ||
+ !iosys_map_is_null(&bo->vmap));
return 0;
}
/**
- * xe_bo_restore_kernel - restore kernel BOs to VRAM
+ * xe_bo_restore_early - restore early phase kernel BOs to VRAM
*
* @xe: xe device
*
@@ -120,109 +224,130 @@ int xe_bo_evict_all(struct xe_device *xe)
* This function should be called early, before trying to init the GT, on device
* resume.
*/
-int xe_bo_restore_kernel(struct xe_device *xe)
+int xe_bo_restore_early(struct xe_device *xe)
{
- struct xe_bo *bo;
- int ret;
+ return xe_bo_apply_to_pinned(xe, &xe->pinned.early.evicted,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_restore_and_map_ggtt);
+}
- if (!IS_DGFX(xe))
- return 0;
+/**
+ * xe_bo_restore_late - restore pinned late phase BOs
+ *
+ * @xe: xe device
+ *
+ * Move pinned user and kernel BOs which can use blitter from temporary
+ * (typically system) memory to VRAM. All moves done via TTM calls.
+ *
+ * This function should be called late, after GT init, on device resume.
+ */
+int xe_bo_restore_late(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ int ret, id;
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.evicted,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- xe_bo_get(bo);
- list_move_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present);
- spin_unlock(&xe->pinned.lock);
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.evicted,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_restore_and_map_ggtt);
- xe_bo_lock(bo, false);
- ret = xe_bo_restore_pinned(bo);
- xe_bo_unlock(bo);
- if (ret) {
- xe_bo_put(bo);
- return ret;
- }
+ for_each_tile(tile, xe, id)
+ xe_tile_migrate_wait(tile);
- if (bo->flags & XE_BO_FLAG_GGTT) {
- struct xe_tile *tile = bo->tile;
+ if (ret)
+ return ret;
- mutex_lock(&tile->mem.ggtt->lock);
- xe_ggtt_map_bo(tile->mem.ggtt, bo);
- mutex_unlock(&tile->mem.ggtt->lock);
- }
+ if (!IS_DGFX(xe))
+ return 0;
- /*
- * We expect validate to trigger a move VRAM and our move code
- * should setup the iosys map.
- */
- xe_assert(xe, !iosys_map_is_null(&bo->vmap));
- xe_assert(xe, xe_bo_is_vram(bo));
+ /* Pinned user memory in VRAM should be validated on resume */
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external,
+ xe_bo_restore_pinned);
- xe_bo_put(bo);
+ /* Wait for restore to complete */
+ for_each_tile(tile, xe, id)
+ xe_tile_migrate_wait(tile);
- spin_lock(&xe->pinned.lock);
- }
- spin_unlock(&xe->pinned.lock);
+ return ret;
+}
- return 0;
+static void xe_bo_pci_dev_remove_pinned(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ unsigned int id;
+
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external,
+ xe_bo_dma_unmap_pinned);
+ for_each_tile(tile, xe, id)
+ xe_tile_migrate_wait(tile);
}
/**
- * xe_bo_restore_user - restore pinned user BOs to VRAM
- *
- * @xe: xe device
+ * xe_bo_pci_dev_remove_all() - Handle bos when the pci_device is about to be removed
+ * @xe: The xe device.
*
- * Move pinned user BOs from temporary (typically system) memory to VRAM via
- * CPU. All moves done via TTM calls.
+ * On pci_device removal we need to drop all dma mappings and move
+ * the data of exported bos out to system. This includes SVM bos and
+ * exported dma-buf bos. This is done by evicting all bos, but
+ * the evict placement in xe_evict_flags() is chosen such that all
+ * bos except those mentioned are purged, and thus their memory
+ * is released.
*
- * This function should be called late, after GT init, on device resume.
+ * For pinned bos, we're unmapping dma.
*/
-int xe_bo_restore_user(struct xe_device *xe)
+void xe_bo_pci_dev_remove_all(struct xe_device *xe)
{
- struct xe_bo *bo;
- struct xe_tile *tile;
- struct list_head still_in_list;
- u8 id;
- int ret;
+ unsigned int mem_type;
- if (!IS_DGFX(xe))
- return 0;
+ /*
+ * Move pagemap bos and exported dma-buf to system, and
+ * purge everything else.
+ */
+ for (mem_type = XE_PL_VRAM1; mem_type >= XE_PL_TT; --mem_type) {
+ struct ttm_resource_manager *man =
+ ttm_manager_type(&xe->ttm, mem_type);
- /* Pinned user memory in VRAM should be validated on resume */
- INIT_LIST_HEAD(&still_in_list);
- spin_lock(&xe->pinned.lock);
- for (;;) {
- bo = list_first_entry_or_null(&xe->pinned.external_vram,
- typeof(*bo), pinned_link);
- if (!bo)
- break;
- list_move_tail(&bo->pinned_link, &still_in_list);
- xe_bo_get(bo);
- spin_unlock(&xe->pinned.lock);
+ if (man) {
+ int ret = ttm_resource_manager_evict_all(&xe->ttm, man);
- xe_bo_lock(bo, false);
- ret = xe_bo_restore_pinned(bo);
- xe_bo_unlock(bo);
- xe_bo_put(bo);
- if (ret) {
- spin_lock(&xe->pinned.lock);
- list_splice_tail(&still_in_list,
- &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- return ret;
+ drm_WARN_ON(&xe->drm, ret);
}
-
- spin_lock(&xe->pinned.lock);
}
- list_splice_tail(&still_in_list, &xe->pinned.external_vram);
- spin_unlock(&xe->pinned.lock);
- /* Wait for restore to complete */
- for_each_tile(tile, xe, id)
- xe_tile_migrate_wait(tile);
+ xe_bo_pci_dev_remove_pinned(xe);
+}
- return 0;
+static void xe_bo_pinned_fini(void *arg)
+{
+ struct xe_device *xe = arg;
+
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
+ &xe->pinned.late.kernel_bo_present,
+ xe_bo_dma_unmap_pinned);
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.early.kernel_bo_present,
+ &xe->pinned.early.kernel_bo_present,
+ xe_bo_dma_unmap_pinned);
+}
+
+/**
+ * xe_bo_pinned_init() - Initialize pinned bo tracking
+ * @xe: The xe device.
+ *
+ * Initializes the lists and locks required for pinned bo
+ * tracking and registers a callback to dma-unmap
+ * any remaining pinned bos on pci device removal.
+ *
+ * Return: %0 on success, negative error code on error.
+ */
+int xe_bo_pinned_init(struct xe_device *xe)
+{
+ spin_lock_init(&xe->pinned.lock);
+ INIT_LIST_HEAD(&xe->pinned.early.kernel_bo_present);
+ INIT_LIST_HEAD(&xe->pinned.early.evicted);
+ INIT_LIST_HEAD(&xe->pinned.late.kernel_bo_present);
+ INIT_LIST_HEAD(&xe->pinned.late.evicted);
+ INIT_LIST_HEAD(&xe->pinned.late.external);
+
+ return devm_add_action_or_reset(xe->drm.dev, xe_bo_pinned_fini, xe);
}
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.h b/drivers/gpu/drm/xe/xe_bo_evict.h
index 746894798852..e8385cb7f5e9 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.h
+++ b/drivers/gpu/drm/xe/xe_bo_evict.h
@@ -9,7 +9,13 @@
struct xe_device;
int xe_bo_evict_all(struct xe_device *xe);
-int xe_bo_restore_kernel(struct xe_device *xe);
-int xe_bo_restore_user(struct xe_device *xe);
+int xe_bo_evict_all_user(struct xe_device *xe);
+int xe_bo_notifier_prepare_all_pinned(struct xe_device *xe);
+void xe_bo_notifier_unprepare_all_pinned(struct xe_device *xe);
+int xe_bo_restore_early(struct xe_device *xe);
+int xe_bo_restore_late(struct xe_device *xe);
+void xe_bo_pci_dev_remove_all(struct xe_device *xe);
+
+int xe_bo_pinned_init(struct xe_device *xe);
#endif
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index 2ed558ac2264..eb5e83c5f233 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -8,11 +8,12 @@
#include <linux/iosys-map.h>
+#include <drm/drm_gpusvm.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_device.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_placement.h>
+#include "xe_device_types.h"
#include "xe_ggtt_types.h"
struct xe_device;
@@ -27,6 +28,10 @@ struct xe_vm;
struct xe_bo {
/** @ttm: TTM base buffer object */
struct ttm_buffer_object ttm;
+ /** @backup_obj: The backup object when pinned and suspended (vram only) */
+ struct xe_bo *backup_obj;
+ /** @parent_obj: Ref to parent bo if this a backup_obj */
+ struct xe_bo *parent_obj;
/** @size: Size of this buffer object */
size_t size;
/** @flags: flags for this buffer object */
@@ -39,8 +44,8 @@ struct xe_bo {
struct ttm_place placements[XE_BO_MAX_PLACEMENTS];
/** @placement: current placement for this BO */
struct ttm_placement placement;
- /** @ggtt_node: GGTT node if this BO is mapped in the GGTT */
- struct xe_ggtt_node *ggtt_node;
+ /** @ggtt_node: Array of GGTT nodes if this BO is mapped in the GGTTs */
+ struct xe_ggtt_node *ggtt_node[XE_MAX_TILES_PER_DEVICE];
/** @vmap: iosys map of this buffer */
struct iosys_map vmap;
/** @ttm_kmap: TTM bo kmap object for internal use only. Keep off. */
@@ -57,6 +62,12 @@ struct xe_bo {
*/
struct list_head client_link;
#endif
+ /**
+ * @pxp_key_instance: PXP key instance this BO was created against. A
+ * 0 in this variable indicates that the BO does not use PXP encryption.
+ */
+ u32 pxp_key_instance;
+
/** @freed: List node for delayed put. */
struct llist_node freed;
/** @update_index: Update index if PT BO */
@@ -74,11 +85,16 @@ struct xe_bo {
*/
u16 cpu_caching;
+ /** @devmem_allocation: SVM device memory allocation */
+ struct drm_gpusvm_devmem devmem_allocation;
+
/** @vram_userfault_link: Link into @mem_access.vram_userfault.list */
struct list_head vram_userfault_link;
-};
-#define intel_bo_to_drm_bo(bo) (&(bo)->ttm.base)
-#define intel_bo_to_i915(bo) to_i915(intel_bo_to_drm_bo(bo)->dev)
+ /** @min_align: minimum alignment needed for this BO if different
+ * from default
+ */
+ u64 min_align;
+};
#endif
diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
new file mode 100644
index 000000000000..cb9f175c89a1
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_configfs.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/configfs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "xe_configfs.h"
+#include "xe_module.h"
+
+/**
+ * DOC: Xe Configfs
+ *
+ * Overview
+ * =========
+ *
+ * Configfs is a filesystem-based manager of kernel objects. XE KMD registers a
+ * configfs subsystem called ``'xe'`` that creates a directory in the mounted configfs directory
+ * The user can create devices under this directory and configure them as necessary
+ * See Documentation/filesystems/configfs.rst for more information about how configfs works.
+ *
+ * Create devices
+ * ===============
+ *
+ * In order to create a device, the user has to create a directory inside ``'xe'``::
+ *
+ * mkdir /sys/kernel/config/xe/0000:03:00.0/
+ *
+ * Every device created is populated by the driver with entries that can be
+ * used to configure it::
+ *
+ * /sys/kernel/config/xe/
+ * .. 0000:03:00.0/
+ * ... survivability_mode
+ *
+ * Configure Attributes
+ * ====================
+ *
+ * Survivability mode:
+ * -------------------
+ *
+ * Enable survivability mode on supported cards. This setting only takes
+ * effect when probing the device. Example to enable it::
+ *
+ * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
+ * # echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind (Enters survivability mode if supported)
+ *
+ * Remove devices
+ * ==============
+ *
+ * The created device directories can be removed using ``rmdir``::
+ *
+ * rmdir /sys/kernel/config/xe/0000:03:00.0/
+ */
+
+struct xe_config_device {
+ struct config_group group;
+
+ bool survivability_mode;
+
+ /* protects attributes */
+ struct mutex lock;
+};
+
+static struct xe_config_device *to_xe_config_device(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct xe_config_device, group);
+}
+
+static ssize_t survivability_mode_show(struct config_item *item, char *page)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+
+ return sprintf(page, "%d\n", dev->survivability_mode);
+}
+
+static ssize_t survivability_mode_store(struct config_item *item, const char *page, size_t len)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+ bool survivability_mode;
+ int ret;
+
+ ret = kstrtobool(page, &survivability_mode);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev->lock);
+ dev->survivability_mode = survivability_mode;
+ mutex_unlock(&dev->lock);
+
+ return len;
+}
+
+CONFIGFS_ATTR(, survivability_mode);
+
+static struct configfs_attribute *xe_config_device_attrs[] = {
+ &attr_survivability_mode,
+ NULL,
+};
+
+static void xe_config_device_release(struct config_item *item)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+
+ mutex_destroy(&dev->lock);
+ kfree(dev);
+}
+
+static struct configfs_item_operations xe_config_device_ops = {
+ .release = xe_config_device_release,
+};
+
+static const struct config_item_type xe_config_device_type = {
+ .ct_item_ops = &xe_config_device_ops,
+ .ct_attrs = xe_config_device_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *xe_config_make_device_group(struct config_group *group,
+ const char *name)
+{
+ unsigned int domain, bus, slot, function;
+ struct xe_config_device *dev;
+ struct pci_dev *pdev;
+ int ret;
+
+ ret = sscanf(name, "%04x:%02x:%02x.%x", &domain, &bus, &slot, &function);
+ if (ret != 4)
+ return ERR_PTR(-EINVAL);
+
+ pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
+ if (!pdev)
+ return ERR_PTR(-EINVAL);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ config_group_init_type_name(&dev->group, name, &xe_config_device_type);
+
+ mutex_init(&dev->lock);
+
+ return &dev->group;
+}
+
+static struct configfs_group_operations xe_config_device_group_ops = {
+ .make_group = xe_config_make_device_group,
+};
+
+static const struct config_item_type xe_configfs_type = {
+ .ct_group_ops = &xe_config_device_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem xe_configfs = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "xe",
+ .ci_type = &xe_configfs_type,
+ },
+ },
+};
+
+static struct xe_config_device *configfs_find_group(struct pci_dev *pdev)
+{
+ struct config_item *item;
+ char name[64];
+
+ snprintf(name, sizeof(name), "%04x:%02x:%02x.%x", pci_domain_nr(pdev->bus),
+ pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+
+ mutex_lock(&xe_configfs.su_mutex);
+ item = config_group_find_item(&xe_configfs.su_group, name);
+ mutex_unlock(&xe_configfs.su_mutex);
+
+ if (!item)
+ return NULL;
+
+ return to_xe_config_device(item);
+}
+
+/**
+ * xe_configfs_get_survivability_mode - get configfs survivability mode attribute
+ * @pdev: pci device
+ *
+ * find the configfs group that belongs to the pci device and return
+ * the survivability mode attribute
+ *
+ * Return: survivability mode if config group is found, false otherwise
+ */
+bool xe_configfs_get_survivability_mode(struct pci_dev *pdev)
+{
+ struct xe_config_device *dev = configfs_find_group(pdev);
+ bool mode;
+
+ if (!dev)
+ return false;
+
+ mode = dev->survivability_mode;
+ config_item_put(&dev->group.cg_item);
+
+ return mode;
+}
+
+/**
+ * xe_configfs_clear_survivability_mode - clear configfs survivability mode attribute
+ * @pdev: pci device
+ *
+ * find the configfs group that belongs to the pci device and clear survivability
+ * mode attribute
+ */
+void xe_configfs_clear_survivability_mode(struct pci_dev *pdev)
+{
+ struct xe_config_device *dev = configfs_find_group(pdev);
+
+ if (!dev)
+ return;
+
+ mutex_lock(&dev->lock);
+ dev->survivability_mode = 0;
+ mutex_unlock(&dev->lock);
+
+ config_item_put(&dev->group.cg_item);
+}
+
+int __init xe_configfs_init(void)
+{
+ struct config_group *root = &xe_configfs.su_group;
+ int ret;
+
+ config_group_init(root);
+ mutex_init(&xe_configfs.su_mutex);
+ ret = configfs_register_subsystem(&xe_configfs);
+ if (ret) {
+ pr_err("Error %d while registering %s subsystem\n",
+ ret, root->cg_item.ci_namebuf);
+ return ret;
+ }
+
+ return 0;
+}
+
+void __exit xe_configfs_exit(void)
+{
+ configfs_unregister_subsystem(&xe_configfs);
+}
+
diff --git a/drivers/gpu/drm/xe/xe_configfs.h b/drivers/gpu/drm/xe/xe_configfs.h
new file mode 100644
index 000000000000..d7d041ec2611
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_configfs.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+#ifndef _XE_CONFIGFS_H_
+#define _XE_CONFIGFS_H_
+
+#include <linux/types.h>
+
+struct pci_dev;
+
+#if IS_ENABLED(CONFIG_CONFIGFS_FS)
+int xe_configfs_init(void);
+void xe_configfs_exit(void);
+bool xe_configfs_get_survivability_mode(struct pci_dev *pdev);
+void xe_configfs_clear_survivability_mode(struct pci_dev *pdev);
+#else
+static inline int xe_configfs_init(void) { return 0; };
+static inline void xe_configfs_exit(void) {};
+static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; };
+static inline void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) {};
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index fe4319eb13fd..d0503959a8ed 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -18,6 +18,7 @@
#include "xe_gt_printk.h"
#include "xe_guc_ads.h"
#include "xe_pm.h"
+#include "xe_pxp_debugfs.h"
#include "xe_sriov.h"
#include "xe_step.h"
@@ -90,13 +91,32 @@ static int forcewake_open(struct inode *inode, struct file *file)
{
struct xe_device *xe = inode->i_private;
struct xe_gt *gt;
- u8 id;
+ u8 id, last_gt;
+ unsigned int fw_ref;
xe_pm_runtime_get(xe);
- for_each_gt(gt, xe, id)
- XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ for_each_gt(gt, xe, id) {
+ last_gt = id;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
+ goto err_fw_get;
+ }
return 0;
+
+err_fw_get:
+ for_each_gt(gt, xe, id) {
+ if (id < last_gt)
+ xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ else if (id == last_gt)
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ else
+ break;
+ }
+
+ xe_pm_runtime_put(xe);
+ return -ETIMEDOUT;
}
static int forcewake_release(struct inode *inode, struct file *file)
@@ -106,7 +126,7 @@ static int forcewake_release(struct inode *inode, struct file *file)
u8 id;
for_each_gt(gt, xe, id)
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
xe_pm_runtime_put(xe);
return 0;
@@ -147,7 +167,7 @@ static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf,
return -EINVAL;
if (xe->wedged.mode == wedged_mode)
- return 0;
+ return size;
xe->wedged.mode = wedged_mode;
@@ -156,6 +176,7 @@ static ssize_t wedged_mode_set(struct file *f, const char __user *ubuf,
ret = xe_guc_ads_scheduler_policy_toggle_reset(&gt->uc.guc.ads);
if (ret) {
xe_gt_err(gt, "Failed to update GuC ADS scheduler policy. GuC may still cause engine reset even with wedged_mode=2\n");
+ xe_pm_runtime_put(xe);
return -EIO;
}
}
@@ -211,5 +232,7 @@ void xe_debugfs_register(struct xe_device *xe)
for_each_gt(gt, xe, id)
xe_gt_debugfs_register(gt);
+ xe_pxp_debugfs_register(xe->pxp);
+
fault_create_debugfs_attr("fail_gt_reset", root, &gt_reset_failure);
}
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index bdb76e834e4c..7a8af2311318 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -6,6 +6,7 @@
#include "xe_devcoredump.h"
#include "xe_devcoredump_types.h"
+#include <linux/ascii85.h>
#include <linux/devcoredump.h>
#include <generated/utsrelease.h>
@@ -16,39 +17,52 @@
#include "xe_force_wake.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
+#include "xe_guc_capture.h"
#include "xe_guc_ct.h"
+#include "xe_guc_log.h"
#include "xe_guc_submit.h"
#include "xe_hw_engine.h"
+#include "xe_module.h"
+#include "xe_pm.h"
#include "xe_sched_job.h"
#include "xe_vm.h"
/**
* DOC: Xe device coredump
*
- * Devices overview:
* Xe uses dev_coredump infrastructure for exposing the crash errors in a
- * standardized way.
- * devcoredump exposes a temporary device under /sys/class/devcoredump/
- * which is linked with our card device directly.
- * The core dump can be accessed either from
- * /sys/class/drm/card<n>/device/devcoredump/ or from
- * /sys/class/devcoredump/devcd<m> where
- * /sys/class/devcoredump/devcd<m>/failing_device is a link to
- * /sys/class/drm/card<n>/device/.
+ * standardized way. Once a crash occurs, devcoredump exposes a temporary
+ * node under ``/sys/class/devcoredump/devcd<m>/``. The same node is also
+ * accessible in ``/sys/class/drm/card<n>/device/devcoredump/``. The
+ * ``failing_device`` symlink points to the device that crashed and created the
+ * coredump.
*
- * Snapshot at hang:
- * The 'data' file is printed with a drm_printer pointer at devcoredump read
- * time. For this reason, we need to take snapshots from when the hang has
- * happened, and not only when the user is reading the file. Otherwise the
- * information is outdated since the resets might have happened in between.
+ * The following characteristics are observed by xe when creating a device
+ * coredump:
*
- * 'First' failure snapshot:
- * In general, the first hang is the most critical one since the following hangs
- * can be a consequence of the initial hang. For this reason we only take the
- * snapshot of the 'first' failure and ignore subsequent calls of this function,
- * at least while the coredump device is alive. Dev_coredump has a delayed work
- * queue that will eventually delete the device and free all the dump
- * information.
+ * **Snapshot at hang**:
+ * The 'data' file contains a snapshot of the HW and driver states at the time
+ * the hang happened. Due to the driver recovering from resets/crashes, it may
+ * not correspond to the state of the system when the file is read by
+ * userspace.
+ *
+ * **Coredump release**:
+ * After a coredump is generated, it stays in kernel memory until released by
+ * userspace by writing anything to it, or after an internal timer expires. The
+ * exact timeout may vary and should not be relied upon. Example to release
+ * a coredump:
+ *
+ * .. code-block:: shell
+ *
+ * $ > /sys/class/drm/card0/device/devcoredump/data
+ *
+ * **First failure only**:
+ * In general, the first hang is the most critical one since the following
+ * hangs can be a consequence of the initial hang. For this reason a snapshot
+ * is taken only for the first failure. Until the devcoredump is released by
+ * userspace or kernel, all subsequent hangs do not override the snapshot nor
+ * create new ones. Devcoredump has a delayed work queue that will eventually
+ * delete the file node and free all the dump information.
*/
#ifdef CONFIG_DEV_COREDUMP
@@ -66,7 +80,8 @@ static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q)
return &q->gt->uc.guc;
}
-static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
+static ssize_t __xe_devcoredump_read(char *buffer, ssize_t count,
+ ssize_t start,
struct xe_devcoredump *coredump)
{
struct xe_device *xe;
@@ -80,36 +95,44 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
ss = &coredump->snapshot;
iter.data = buffer;
- iter.start = 0;
+ iter.start = start;
iter.remain = count;
p = drm_coredump_printer(&iter);
- drm_printf(&p, "**** Xe Device Coredump ****\n");
- drm_printf(&p, "kernel: " UTS_RELEASE "\n");
- drm_printf(&p, "module: " KBUILD_MODNAME "\n");
+ drm_puts(&p, "**** Xe Device Coredump ****\n");
+ drm_printf(&p, "Reason: %s\n", ss->reason);
+ drm_puts(&p, "kernel: " UTS_RELEASE "\n");
+ drm_puts(&p, "module: " KBUILD_MODNAME "\n");
ts = ktime_to_timespec64(ss->snapshot_time);
drm_printf(&p, "Snapshot time: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
ts = ktime_to_timespec64(ss->boot_time);
drm_printf(&p, "Uptime: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
- drm_printf(&p, "Process: %s\n", ss->process_name);
+ drm_printf(&p, "Process: %s [%d]\n", ss->process_name, ss->pid);
xe_device_snapshot_print(xe, &p);
- drm_printf(&p, "\n**** GuC CT ****\n");
- xe_guc_ct_snapshot_print(coredump->snapshot.ct, &p);
- xe_guc_exec_queue_snapshot_print(coredump->snapshot.ge, &p);
+ drm_printf(&p, "\n**** GT #%d ****\n", ss->gt->info.id);
+ drm_printf(&p, "\tTile: %d\n", ss->gt->tile->id);
- drm_printf(&p, "\n**** Job ****\n");
- xe_sched_job_snapshot_print(coredump->snapshot.job, &p);
+ drm_puts(&p, "\n**** GuC Log ****\n");
+ xe_guc_log_snapshot_print(ss->guc.log, &p);
+ drm_puts(&p, "\n**** GuC CT ****\n");
+ xe_guc_ct_snapshot_print(ss->guc.ct, &p);
- drm_printf(&p, "\n**** HW Engines ****\n");
+ drm_puts(&p, "\n**** Contexts ****\n");
+ xe_guc_exec_queue_snapshot_print(ss->ge, &p);
+
+ drm_puts(&p, "\n**** Job ****\n");
+ xe_sched_job_snapshot_print(ss->job, &p);
+
+ drm_puts(&p, "\n**** HW Engines ****\n");
for (i = 0; i < XE_NUM_HW_ENGINES; i++)
- if (coredump->snapshot.hwe[i])
- xe_hw_engine_snapshot_print(coredump->snapshot.hwe[i],
- &p);
- drm_printf(&p, "\n**** VM state ****\n");
- xe_vm_snapshot_print(coredump->snapshot.vm, &p);
+ if (ss->hwe[i])
+ xe_engine_snapshot_print(ss->hwe[i], &p);
+
+ drm_puts(&p, "\n**** VM state ****\n");
+ xe_vm_snapshot_print(ss->vm, &p);
return count - iter.remain;
}
@@ -118,8 +141,17 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss)
{
int i;
- xe_guc_ct_snapshot_free(ss->ct);
- ss->ct = NULL;
+ kfree(ss->reason);
+ ss->reason = NULL;
+
+ xe_guc_log_snapshot_free(ss->guc.log);
+ ss->guc.log = NULL;
+
+ xe_guc_ct_snapshot_free(ss->guc.ct);
+ ss->guc.ct = NULL;
+
+ xe_guc_capture_put_matched_nodes(&ss->gt->uc.guc);
+ ss->matched_node = NULL;
xe_guc_exec_queue_snapshot_free(ss->ge);
ss->ge = NULL;
@@ -137,28 +169,7 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss)
ss->vm = NULL;
}
-static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
-{
- struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
- struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot);
-
- /* keep going if fw fails as we still want to save the memory and SW data */
- if (xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL))
- xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
- xe_vm_snapshot_capture_delayed(ss->vm);
- xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
- xe_force_wake_put(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
-
- /* Calculate devcoredump size */
- ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump);
-
- ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
- if (!ss->read.buffer)
- return;
-
- __xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump);
- xe_devcoredump_snapshot_free(ss);
-}
+#define XE_DEVCOREDUMP_CHUNK_MAX (SZ_512M + SZ_1G)
static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
@@ -166,6 +177,8 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
struct xe_devcoredump *coredump = data;
struct xe_devcoredump_snapshot *ss;
ssize_t byte_copied;
+ u32 chunk_offset;
+ ssize_t new_chunk_position;
if (!coredump)
return -ENODEV;
@@ -175,15 +188,43 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
/* Ensure delayed work is captured before continuing */
flush_work(&ss->work);
- if (!ss->read.buffer)
+ if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX)
+ xe_pm_runtime_get(gt_to_xe(ss->gt));
+
+ mutex_lock(&coredump->lock);
+
+ if (!ss->read.buffer) {
+ mutex_unlock(&coredump->lock);
return -ENODEV;
+ }
- if (offset >= ss->read.size)
+ if (offset >= ss->read.size) {
+ mutex_unlock(&coredump->lock);
return 0;
+ }
+
+ new_chunk_position = div_u64_rem(offset,
+ XE_DEVCOREDUMP_CHUNK_MAX,
+ &chunk_offset);
+
+ if (offset >= ss->read.chunk_position + XE_DEVCOREDUMP_CHUNK_MAX ||
+ offset < ss->read.chunk_position) {
+ ss->read.chunk_position = new_chunk_position *
+ XE_DEVCOREDUMP_CHUNK_MAX;
+
+ __xe_devcoredump_read(ss->read.buffer,
+ XE_DEVCOREDUMP_CHUNK_MAX,
+ ss->read.chunk_position, coredump);
+ }
byte_copied = count < ss->read.size - offset ? count :
ss->read.size - offset;
- memcpy(buffer, ss->read.buffer + offset, byte_copied);
+ memcpy(buffer, ss->read.buffer + chunk_offset, byte_copied);
+
+ mutex_unlock(&coredump->lock);
+
+ if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX)
+ xe_pm_runtime_put(gt_to_xe(ss->gt));
return byte_copied;
}
@@ -198,6 +239,8 @@ static void xe_devcoredump_free(void *data)
cancel_work_sync(&coredump->snapshot.work);
+ mutex_lock(&coredump->lock);
+
xe_devcoredump_snapshot_free(&coredump->snapshot);
kvfree(coredump->snapshot.read.buffer);
@@ -206,28 +249,86 @@ static void xe_devcoredump_free(void *data)
coredump->captured = false;
drm_info(&coredump_to_xe(coredump)->drm,
"Xe device coredump has been deleted.\n");
+
+ mutex_unlock(&coredump->lock);
+}
+
+static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
+{
+ struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
+ struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot);
+ struct xe_device *xe = coredump_to_xe(coredump);
+ unsigned int fw_ref;
+
+ /*
+ * NB: Despite passing a GFP_ flags parameter here, more allocations are done
+ * internally using GFP_KERNEL explicitly. Hence this call must be in the worker
+ * thread and not in the initial capture call.
+ */
+ dev_coredumpm_timeout(gt_to_xe(ss->gt)->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL,
+ xe_devcoredump_read, xe_devcoredump_free,
+ XE_COREDUMP_TIMEOUT_JIFFIES);
+
+ xe_pm_runtime_get(xe);
+
+ /* keep going if fw fails as we still want to save the memory and SW data */
+ fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
+ xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
+ xe_vm_snapshot_capture_delayed(ss->vm);
+ xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
+ xe_force_wake_put(gt_to_fw(ss->gt), fw_ref);
+
+ ss->read.chunk_position = 0;
+
+ /* Calculate devcoredump size */
+ ss->read.size = __xe_devcoredump_read(NULL, LONG_MAX, 0, coredump);
+
+ if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX) {
+ ss->read.buffer = kvmalloc(XE_DEVCOREDUMP_CHUNK_MAX,
+ GFP_USER);
+ if (!ss->read.buffer)
+ goto put_pm;
+
+ __xe_devcoredump_read(ss->read.buffer,
+ XE_DEVCOREDUMP_CHUNK_MAX,
+ 0, coredump);
+ } else {
+ ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
+ if (!ss->read.buffer)
+ goto put_pm;
+
+ __xe_devcoredump_read(ss->read.buffer, ss->read.size, 0,
+ coredump);
+ xe_devcoredump_snapshot_free(ss);
+ }
+
+put_pm:
+ xe_pm_runtime_put(xe);
}
static void devcoredump_snapshot(struct xe_devcoredump *coredump,
+ struct xe_exec_queue *q,
struct xe_sched_job *job)
{
struct xe_devcoredump_snapshot *ss = &coredump->snapshot;
- struct xe_exec_queue *q = job->q;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_hw_engine *hwe;
- enum xe_hw_engine_id id;
u32 adj_logical_mask = q->logical_mask;
u32 width_mask = (0x1 << q->width) - 1;
const char *process_name = "no process";
- int i;
+ unsigned int fw_ref;
bool cookie;
+ int i;
ss->snapshot_time = ktime_get_real();
ss->boot_time = ktime_get_boottime();
- if (q->vm && q->vm->xef)
+ if (q->vm && q->vm->xef) {
process_name = q->vm->xef->process_name;
+ ss->pid = q->vm->xef->pid;
+ }
+
strscpy(ss->process_name, process_name);
ss->gt = q->gt;
@@ -244,57 +345,61 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump,
}
/* keep going if fw fails as we still want to save the memory and SW data */
- if (xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL))
- xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
+ fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
- coredump->snapshot.ct = xe_guc_ct_snapshot_capture(&guc->ct, true);
- coredump->snapshot.ge = xe_guc_exec_queue_snapshot_capture(q);
- coredump->snapshot.job = xe_sched_job_snapshot_capture(job);
- coredump->snapshot.vm = xe_vm_snapshot_capture(q->vm);
+ ss->guc.log = xe_guc_log_snapshot_capture(&guc->log, true);
+ ss->guc.ct = xe_guc_ct_snapshot_capture(&guc->ct);
+ ss->ge = xe_guc_exec_queue_snapshot_capture(q);
+ if (job)
+ ss->job = xe_sched_job_snapshot_capture(job);
+ ss->vm = xe_vm_snapshot_capture(q->vm);
- for_each_hw_engine(hwe, q->gt, id) {
- if (hwe->class != q->hwe->class ||
- !(BIT(hwe->logical_instance) & adj_logical_mask)) {
- coredump->snapshot.hwe[id] = NULL;
- continue;
- }
- coredump->snapshot.hwe[id] = xe_hw_engine_snapshot_capture(hwe);
- }
+ xe_engine_snapshot_capture_for_queue(q);
queue_work(system_unbound_wq, &ss->work);
- xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
+ xe_force_wake_put(gt_to_fw(q->gt), fw_ref);
dma_fence_end_signalling(cookie);
}
/**
* xe_devcoredump - Take the required snapshots and initialize coredump device.
+ * @q: The faulty xe_exec_queue, where the issue was detected.
* @job: The faulty xe_sched_job, where the issue was detected.
+ * @fmt: Printf format + args to describe the reason for the core dump
*
* This function should be called at the crash time within the serialized
* gt_reset. It is skipped if we still have the core dump device available
* with the information of the 'first' snapshot.
*/
-void xe_devcoredump(struct xe_sched_job *job)
+__printf(3, 4)
+void xe_devcoredump(struct xe_exec_queue *q, struct xe_sched_job *job, const char *fmt, ...)
{
- struct xe_device *xe = gt_to_xe(job->q->gt);
+ struct xe_device *xe = gt_to_xe(q->gt);
struct xe_devcoredump *coredump = &xe->devcoredump;
+ va_list varg;
+
+ mutex_lock(&coredump->lock);
if (coredump->captured) {
drm_dbg(&xe->drm, "Multiple hangs are occurring, but only the first snapshot was taken\n");
+ mutex_unlock(&coredump->lock);
return;
}
coredump->captured = true;
- devcoredump_snapshot(coredump, job);
+
+ va_start(varg, fmt);
+ coredump->snapshot.reason = kvasprintf(GFP_ATOMIC, fmt, varg);
+ va_end(varg);
+
+ devcoredump_snapshot(coredump, q, job);
drm_info(&xe->drm, "Xe device coredump has been created\n");
drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
xe->drm.primary->index);
- dev_coredumpm_timeout(xe->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL,
- xe_devcoredump_read, xe_devcoredump_free,
- XE_COREDUMP_TIMEOUT_JIFFIES);
+ mutex_unlock(&coredump->lock);
}
static void xe_driver_devcoredump_fini(void *arg)
@@ -306,7 +411,106 @@ static void xe_driver_devcoredump_fini(void *arg)
int xe_devcoredump_init(struct xe_device *xe)
{
+ int err;
+
+ err = drmm_mutex_init(&xe->drm, &xe->devcoredump.lock);
+ if (err)
+ return err;
+
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&xe->devcoredump.lock);
+ fs_reclaim_release(GFP_KERNEL);
+ }
+
return devm_add_action_or_reset(xe->drm.dev, xe_driver_devcoredump_fini, &xe->drm);
}
#endif
+
+/**
+ * xe_print_blob_ascii85 - print a BLOB to some useful location in ASCII85
+ *
+ * The output is split into multiple calls to drm_puts() because some print
+ * targets, e.g. dmesg, cannot handle arbitrarily long lines. These targets may
+ * add newlines, as is the case with dmesg: each drm_puts() call creates a
+ * separate line.
+ *
+ * There is also a scheduler yield call to prevent the 'task has been stuck for
+ * 120s' kernel hang check feature from firing when printing to a slow target
+ * such as dmesg over a serial port.
+ *
+ * @p: the printer object to output to
+ * @prefix: optional prefix to add to output string
+ * @suffix: optional suffix to add at the end. 0 disables it and is
+ * not added to the output, which is useful when using multiple calls
+ * to dump data to @p
+ * @blob: the Binary Large OBject to dump out
+ * @offset: offset in bytes to skip from the front of the BLOB, must be a multiple of sizeof(u32)
+ * @size: the size in bytes of the BLOB, must be a multiple of sizeof(u32)
+ */
+void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffix,
+ const void *blob, size_t offset, size_t size)
+{
+ const u32 *blob32 = (const u32 *)blob;
+ char buff[ASCII85_BUFSZ], *line_buff;
+ size_t line_pos = 0;
+
+#define DMESG_MAX_LINE_LEN 800
+ /* Always leave space for the suffix char and the \0 */
+#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "<suffix>\0" */
+
+ if (size & 3)
+ drm_printf(p, "Size not word aligned: %zu", size);
+ if (offset & 3)
+ drm_printf(p, "Offset not word aligned: %zu", offset);
+
+ line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_ATOMIC);
+ if (!line_buff) {
+ drm_printf(p, "Failed to allocate line buffer\n");
+ return;
+ }
+
+ blob32 += offset / sizeof(*blob32);
+ size /= sizeof(*blob32);
+
+ if (prefix) {
+ strscpy(line_buff, prefix, DMESG_MAX_LINE_LEN - MIN_SPACE - 2);
+ line_pos = strlen(line_buff);
+
+ line_buff[line_pos++] = ':';
+ line_buff[line_pos++] = ' ';
+ }
+
+ while (size--) {
+ u32 val = *(blob32++);
+
+ strscpy(line_buff + line_pos, ascii85_encode(val, buff),
+ DMESG_MAX_LINE_LEN - line_pos);
+ line_pos += strlen(line_buff + line_pos);
+
+ if ((line_pos + MIN_SPACE) >= DMESG_MAX_LINE_LEN) {
+ line_buff[line_pos++] = 0;
+
+ drm_puts(p, line_buff);
+
+ line_pos = 0;
+
+ /* Prevent 'stuck thread' time out errors */
+ cond_resched();
+ }
+ }
+
+ if (suffix)
+ line_buff[line_pos++] = suffix;
+
+ if (line_pos) {
+ line_buff[line_pos++] = 0;
+ drm_puts(p, line_buff);
+ }
+
+ kfree(line_buff);
+
+#undef MIN_SPACE
+#undef DMESG_MAX_LINE_LEN
+}
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h
index e2fa65ce0932..5391a80a4d1b 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump.h
@@ -6,14 +6,20 @@
#ifndef _XE_DEVCOREDUMP_H_
#define _XE_DEVCOREDUMP_H_
+#include <linux/types.h>
+
+struct drm_printer;
struct xe_device;
+struct xe_exec_queue;
struct xe_sched_job;
#ifdef CONFIG_DEV_COREDUMP
-void xe_devcoredump(struct xe_sched_job *job);
+void xe_devcoredump(struct xe_exec_queue *q, struct xe_sched_job *job, const char *fmt, ...);
int xe_devcoredump_init(struct xe_device *xe);
#else
-static inline void xe_devcoredump(struct xe_sched_job *job)
+static inline void xe_devcoredump(struct xe_exec_queue *q,
+ struct xe_sched_job *job,
+ const char *fmt, ...)
{
}
@@ -23,4 +29,7 @@ static inline int xe_devcoredump_init(struct xe_device *xe)
}
#endif
+void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffix,
+ const void *blob, size_t offset, size_t size);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_devcoredump_types.h b/drivers/gpu/drm/xe/xe_devcoredump_types.h
index 440d05d77a5a..a174385a6d83 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump_types.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump_types.h
@@ -28,22 +28,37 @@ struct xe_devcoredump_snapshot {
ktime_t boot_time;
/** @process_name: Name of process that triggered this gpu hang */
char process_name[TASK_COMM_LEN];
+ /** @pid: Process id of process that triggered this gpu hang */
+ pid_t pid;
+ /** @reason: The reason the coredump was triggered */
+ char *reason;
/** @gt: Affected GT, used by forcewake for delayed capture */
struct xe_gt *gt;
/** @work: Workqueue for deferred capture outside of signaling context */
struct work_struct work;
- /* GuC snapshots */
- /** @ct: GuC CT snapshot */
- struct xe_guc_ct_snapshot *ct;
- /** @ge: Guc Engine snapshot */
+ /** @guc: GuC snapshots */
+ struct {
+ /** @guc.ct: GuC CT snapshot */
+ struct xe_guc_ct_snapshot *ct;
+ /** @guc.log: GuC log snapshot */
+ struct xe_guc_log_snapshot *log;
+ } guc;
+
+ /** @ge: GuC Submission Engine snapshot */
struct xe_guc_submit_exec_queue_snapshot *ge;
/** @hwe: HW Engine snapshot array */
struct xe_hw_engine_snapshot *hwe[XE_NUM_HW_ENGINES];
/** @job: Snapshot of job state */
struct xe_sched_job_snapshot *job;
+ /**
+ * @matched_node: The matched capture node for timedout job
+ * this single-node tracker works because devcoredump will always only
+ * produce one hw-engine capture per devcoredump event
+ */
+ struct __guc_capture_parsed_output *matched_node;
/** @vm: Snapshot of VM state */
struct xe_vm_snapshot *vm;
@@ -51,6 +66,8 @@ struct xe_devcoredump_snapshot {
struct {
/** @read.size: size of devcoredump in human readable format */
ssize_t size;
+ /** @read.chunk_position: position of devcoredump chunk */
+ ssize_t chunk_position;
/** @read.buffer: buffer of devcoredump in human readable format */
char *buffer;
} read;
@@ -65,7 +82,9 @@ struct xe_devcoredump_snapshot {
* for reading the information.
*/
struct xe_devcoredump {
- /** @captured: The snapshot of the first hang has already been taken. */
+ /** @lock: protects access to entire structure */
+ struct mutex lock;
+ /** @captured: The snapshot of the first hang has already been taken */
bool captured;
/** @snapshot: Snapshot is captured at time of the first crash */
struct xe_devcoredump_snapshot snapshot;
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 10fd4601b9f2..c02c4c4e9412 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -5,10 +5,11 @@
#include "xe_device.h"
+#include <linux/aperture.h>
#include <linux/delay.h>
+#include <linux/fault-inject.h>
#include <linux/units.h>
-#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_client.h>
#include <drm/drm_gem_ttm_helper.h>
@@ -22,8 +23,10 @@
#include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h"
#include "xe_bo.h"
+#include "xe_bo_evict.h"
#include "xe_debugfs.h"
#include "xe_devcoredump.h"
+#include "xe_device_sysfs.h"
#include "xe_dma_buf.h"
#include "xe_drm_client.h"
#include "xe_drv.h"
@@ -43,17 +46,23 @@
#include "xe_memirq.h"
#include "xe_mmio.h"
#include "xe_module.h"
+#include "xe_oa.h"
#include "xe_observation.h"
#include "xe_pat.h"
#include "xe_pcode.h"
#include "xe_pm.h"
+#include "xe_pmu.h"
+#include "xe_pxp.h"
#include "xe_query.h"
+#include "xe_shrinker.h"
+#include "xe_survivability_mode.h"
#include "xe_sriov.h"
#include "xe_tile.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_ttm_sys_mgr.h"
#include "xe_vm.h"
#include "xe_vram.h"
+#include "xe_vsec.h"
#include "xe_wait_user_fence.h"
#include "xe_wa.h"
@@ -87,10 +96,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
mutex_init(&xef->exec_queue.lock);
xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1);
- spin_lock(&xe->clients.lock);
- xe->clients.count++;
- spin_unlock(&xe->clients.lock);
-
file->driver_priv = xef;
kref_init(&xef->refcount);
@@ -107,17 +112,12 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
static void xe_file_destroy(struct kref *ref)
{
struct xe_file *xef = container_of(ref, struct xe_file, refcount);
- struct xe_device *xe = xef->xe;
xa_destroy(&xef->exec_queue.xa);
mutex_destroy(&xef->exec_queue.lock);
xa_destroy(&xef->vm.xa);
mutex_destroy(&xef->vm.lock);
- spin_lock(&xe->clients.lock);
- xe->clients.count--;
- spin_unlock(&xe->clients.lock);
-
xe_drm_client_put(xef->client);
kfree(xef->process_name);
kfree(xef);
@@ -238,12 +238,117 @@ static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned lo
#define xe_drm_compat_ioctl NULL
#endif
+static void barrier_open(struct vm_area_struct *vma)
+{
+ drm_dev_get(vma->vm_private_data);
+}
+
+static void barrier_close(struct vm_area_struct *vma)
+{
+ drm_dev_put(vma->vm_private_data);
+}
+
+static void barrier_release_dummy_page(struct drm_device *dev, void *res)
+{
+ struct page *dummy_page = (struct page *)res;
+
+ __free_page(dummy_page);
+}
+
+static vm_fault_t barrier_fault(struct vm_fault *vmf)
+{
+ struct drm_device *dev = vmf->vma->vm_private_data;
+ struct vm_area_struct *vma = vmf->vma;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+ pgprot_t prot;
+ int idx;
+
+ prot = vm_get_page_prot(vma->vm_flags);
+
+ if (drm_dev_enter(dev, &idx)) {
+ unsigned long pfn;
+
+#define LAST_DB_PAGE_OFFSET 0x7ff001
+ pfn = PHYS_PFN(pci_resource_start(to_pci_dev(dev->dev), 0) +
+ LAST_DB_PAGE_OFFSET);
+ ret = vmf_insert_pfn_prot(vma, vma->vm_start, pfn,
+ pgprot_noncached(prot));
+ drm_dev_exit(idx);
+ } else {
+ struct page *page;
+
+ /* Allocate new dummy page to map all the VA range in this VMA to it*/
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return VM_FAULT_OOM;
+
+ /* Set the page to be freed using drmm release action */
+ if (drmm_add_action_or_reset(dev, barrier_release_dummy_page, page))
+ return VM_FAULT_OOM;
+
+ ret = vmf_insert_pfn_prot(vma, vma->vm_start, page_to_pfn(page),
+ prot);
+ }
+
+ return ret;
+}
+
+static const struct vm_operations_struct vm_ops_barrier = {
+ .open = barrier_open,
+ .close = barrier_close,
+ .fault = barrier_fault,
+};
+
+static int xe_pci_barrier_mmap(struct file *filp,
+ struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct xe_device *xe = to_xe_device(dev);
+
+ if (!IS_DGFX(xe))
+ return -EINVAL;
+
+ if (vma->vm_end - vma->vm_start > SZ_4K)
+ return -EINVAL;
+
+ if (is_cow_mapping(vma->vm_flags))
+ return -EINVAL;
+
+ if (vma->vm_flags & (VM_READ | VM_EXEC))
+ return -EINVAL;
+
+ vm_flags_clear(vma, VM_MAYREAD | VM_MAYEXEC);
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
+ vma->vm_ops = &vm_ops_barrier;
+ vma->vm_private_data = dev;
+ drm_dev_get(vma->vm_private_data);
+
+ return 0;
+}
+
+static int xe_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+
+ if (drm_dev_is_unplugged(dev))
+ return -ENODEV;
+
+ switch (vma->vm_pgoff) {
+ case XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT:
+ return xe_pci_barrier_mmap(filp, vma);
+ }
+
+ return drm_gem_mmap(filp, vma);
+}
+
static const struct file_operations xe_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release_noglobal,
.unlocked_ioctl = xe_drm_ioctl,
- .mmap = drm_gem_mmap,
+ .mmap = xe_mmap,
.poll = drm_poll,
.read = drm_read,
.compat_ioctl = xe_drm_compat_ioctl,
@@ -277,7 +382,6 @@ static struct drm_driver driver = {
.fops = &xe_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
@@ -287,6 +391,8 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
{
struct xe_device *xe = to_xe_device(dev);
+ xe_bo_dev_fini(&xe->bo_device);
+
if (xe->preempt_fence_wq)
destroy_workqueue(xe->preempt_fence_wq);
@@ -296,6 +402,9 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
if (xe->unordered_wq)
destroy_workqueue(xe->unordered_wq);
+ if (!IS_ERR_OR_NULL(xe->mem.shrinker))
+ xe_shrinker_destroy(xe->mem.shrinker);
+
if (xe->destroy_wq)
destroy_workqueue(xe->destroy_wq);
@@ -310,7 +419,7 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
xe_display_driver_set_hooks(&driver);
- err = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver);
+ err = aperture_remove_conflicting_pci_devices(pdev, driver.name);
if (err)
return ERR_PTR(err);
@@ -324,16 +433,22 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
if (WARN_ON(err))
goto err;
+ xe_bo_dev_init(&xe->bo_device);
err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL);
if (err)
goto err;
+ xe->mem.shrinker = xe_shrinker_create(xe);
+ if (IS_ERR(xe->mem.shrinker))
+ return ERR_CAST(xe->mem.shrinker);
+
xe->info.devid = pdev->device;
xe->info.revid = pdev->revision;
xe->info.force_execlist = xe_modparam.force_execlist;
- spin_lock_init(&xe->irq.lock);
- spin_lock_init(&xe->clients.lock);
+ err = xe_irq_init(xe);
+ if (err)
+ goto err;
init_waitqueue_head(&xe->ufence_wq);
@@ -354,12 +469,12 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
xa_erase(&xe->usm.asid_to_vm, asid);
}
- spin_lock_init(&xe->pinned.lock);
- INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
- INIT_LIST_HEAD(&xe->pinned.external_vram);
- INIT_LIST_HEAD(&xe->pinned.evicted);
+ err = xe_bo_pinned_init(xe);
+ if (err)
+ goto err;
- xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq", 0);
+ xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq",
+ WQ_MEM_RECLAIM);
xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0);
xe->destroy_wq = alloc_workqueue("xe-destroy-wq", 0, 0);
@@ -374,6 +489,10 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
goto err;
}
+ err = drmm_mutex_init(&xe->drm, &xe->pmt.lock);
+ if (err)
+ goto err;
+
err = xe_display_create(xe);
if (WARN_ON(err))
goto err;
@@ -383,6 +502,20 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
err:
return ERR_PTR(err);
}
+ALLOW_ERROR_INJECTION(xe_device_create, ERRNO); /* See xe_pci_probe() */
+
+static bool xe_driver_flr_disabled(struct xe_device *xe)
+{
+ if (IS_SRIOV_VF(xe))
+ return true;
+
+ if (xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
+ drm_info(&xe->drm, "Driver-FLR disabled by BIOS\n");
+ return true;
+ }
+
+ return false;
+}
/*
* The driver-initiated FLR is the highest level of reset that we can trigger
@@ -397,17 +530,12 @@ err:
* if/when a new instance of i915 is bound to the device it will do a full
* re-init anyway.
*/
-static void xe_driver_flr(struct xe_device *xe)
+static void __xe_driver_flr(struct xe_device *xe)
{
- const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */
- struct xe_gt *gt = xe_root_mmio_gt(xe);
+ const unsigned int flr_timeout = 3 * USEC_PER_SEC; /* specs recommend a 3s wait */
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
int ret;
- if (xe_mmio_read32(gt, GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
- drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n");
- return;
- }
-
drm_dbg(&xe->drm, "Triggering Driver-FLR\n");
/*
@@ -419,25 +547,25 @@ static void xe_driver_flr(struct xe_device *xe)
* is still pending (unless the HW is totally dead), but better to be
* safe in case something unexpected happens
*/
- ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
+ ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
if (ret) {
drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret);
return;
}
- xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS);
+ xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS);
/* Trigger the actual Driver-FLR */
- xe_mmio_rmw32(gt, GU_CNTL, 0, DRIVERFLR);
+ xe_mmio_rmw32(mmio, GU_CNTL, 0, DRIVERFLR);
/* Wait for hardware teardown to complete */
- ret = xe_mmio_wait32(gt, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
+ ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
if (ret) {
drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
return;
}
/* Wait for hardware/firmware re-init to complete */
- ret = xe_mmio_wait32(gt, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS,
+ ret = xe_mmio_wait32(mmio, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS,
flr_timeout, NULL, false);
if (ret) {
drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
@@ -445,7 +573,15 @@ static void xe_driver_flr(struct xe_device *xe)
}
/* Clear sticky completion status */
- xe_mmio_write32(gt, GU_DEBUG, DRIVERFLR_STATUS);
+ xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS);
+}
+
+static void xe_driver_flr(struct xe_device *xe)
+{
+ if (xe_driver_flr_disabled(xe))
+ return;
+
+ __xe_driver_flr(xe);
}
static void xe_driver_flr_fini(void *arg)
@@ -488,16 +624,15 @@ mask_err:
return err;
}
-static bool verify_lmem_ready(struct xe_gt *gt)
+static bool verify_lmem_ready(struct xe_device *xe)
{
- u32 val = xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT;
+ u32 val = xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT;
return !!val;
}
static int wait_for_lmem_ready(struct xe_device *xe)
{
- struct xe_gt *gt = xe_root_mmio_gt(xe);
unsigned long timeout, start;
if (!IS_DGFX(xe))
@@ -506,13 +641,13 @@ static int wait_for_lmem_ready(struct xe_device *xe)
if (IS_SRIOV_VF(xe))
return 0;
- if (verify_lmem_ready(gt))
+ if (verify_lmem_ready(xe))
return 0;
drm_dbg(&xe->drm, "Waiting for lmem initialization\n");
start = jiffies;
- timeout = start + msecs_to_jiffies(60 * 1000); /* 60 sec! */
+ timeout = start + secs_to_jiffies(60); /* 60 sec! */
do {
if (signal_pending(current))
@@ -535,15 +670,16 @@ static int wait_for_lmem_ready(struct xe_device *xe)
msleep(20);
- } while (!verify_lmem_ready(gt));
+ } while (!verify_lmem_ready(xe));
drm_dbg(&xe->drm, "lmem ready after %ums",
jiffies_to_msecs(jiffies - start));
return 0;
}
+ALLOW_ERROR_INJECTION(wait_for_lmem_ready, ERRNO); /* See xe_pci_probe() */
-static void update_device_info(struct xe_device *xe)
+static void sriov_update_device_info(struct xe_device *xe)
{
/* disable features that are not available/applicable to VFs */
if (IS_SRIOV_VF(xe)) {
@@ -568,17 +704,29 @@ int xe_device_probe_early(struct xe_device *xe)
{
int err;
- err = xe_mmio_init(xe);
+ err = xe_mmio_probe_early(xe);
if (err)
return err;
xe_sriov_probe_early(xe);
- update_device_info(xe);
+ sriov_update_device_info(xe);
err = xe_pcode_probe_early(xe);
- if (err)
- return err;
+ if (err || xe_survivability_mode_is_requested(xe)) {
+ int save_err = err;
+
+ /*
+ * Try to leave device in survivability mode if device is
+ * possible, but still return the previous error for error
+ * propagation
+ */
+ err = xe_survivability_mode_enable(xe);
+ if (err)
+ return err;
+
+ return save_err;
+ }
err = wait_for_lmem_ready(xe);
if (err)
@@ -588,20 +736,23 @@ int xe_device_probe_early(struct xe_device *xe)
return 0;
}
+ALLOW_ERROR_INJECTION(xe_device_probe_early, ERRNO); /* See xe_pci_probe() */
-static int xe_device_set_has_flat_ccs(struct xe_device *xe)
+static int probe_has_flat_ccs(struct xe_device *xe)
{
+ struct xe_gt *gt;
+ unsigned int fw_ref;
u32 reg;
- int err;
- if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs)
+ /* Always enabled/disabled, no runtime check to do */
+ if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs || IS_SRIOV_VF(xe))
return 0;
- struct xe_gt *gt = xe_root_mmio_gt(xe);
+ gt = xe_root_mmio_gt(xe);
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- return err;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER);
xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE);
@@ -610,7 +761,9 @@ static int xe_device_set_has_flat_ccs(struct xe_device *xe)
drm_dbg(&xe->drm,
"Flat CCS has been disabled in bios, May lead to performance impact");
- return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+
+ return 0;
}
int xe_device_probe(struct xe_device *xe)
@@ -618,7 +771,6 @@ int xe_device_probe(struct xe_device *xe)
struct xe_tile *tile;
struct xe_gt *gt;
int err;
- u8 last_gt;
u8 id;
xe_pat_init_early(xe);
@@ -628,9 +780,6 @@ int xe_device_probe(struct xe_device *xe)
return err;
xe->info.mem_region_mask = 1;
- err = xe_display_init_nommio(xe);
- if (err)
- return err;
err = xe_set_dma_info(xe);
if (err)
@@ -640,12 +789,21 @@ int xe_device_probe(struct xe_device *xe)
if (err)
return err;
- xe_ttm_sys_mgr_init(xe);
+ err = xe_ttm_sys_mgr_init(xe);
+ if (err)
+ return err;
for_each_gt(gt, xe, id) {
err = xe_gt_init_early(gt);
if (err)
return err;
+
+ /*
+ * Only after this point can GT-specific MMIO operations
+ * (including things like communication with the GuC)
+ * be performed.
+ */
+ xe_gt_mmio_init(gt);
}
for_each_tile(tile, xe, id) {
@@ -661,11 +819,9 @@ int xe_device_probe(struct xe_device *xe)
err = xe_ggtt_init_early(tile->mem.ggtt);
if (err)
return err;
- if (IS_SRIOV_VF(xe)) {
- err = xe_memirq_init(&tile->sriov.vf.memirq);
- if (err)
- return err;
- }
+ err = xe_memirq_init(&tile->memirq);
+ if (err)
+ return err;
}
for_each_gt(gt, xe, id) {
@@ -677,34 +833,32 @@ int xe_device_probe(struct xe_device *xe)
err = xe_devcoredump_init(xe);
if (err)
return err;
+
+ /*
+ * From here on, if a step fails, make sure a Driver-FLR is triggereed
+ */
err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe);
if (err)
return err;
- err = xe_display_init_noirq(xe);
+ err = probe_has_flat_ccs(xe);
if (err)
return err;
- err = xe_irq_install(xe);
- if (err)
- goto err;
-
- err = xe_device_set_has_flat_ccs(xe);
- if (err)
- goto err;
-
err = xe_vram_probe(xe);
if (err)
- goto err;
+ return err;
for_each_tile(tile, xe, id) {
err = xe_tile_init_noalloc(tile);
if (err)
- goto err;
+ return err;
}
/* Allocate and map stolen after potential VRAM resize */
- xe_ttm_stolen_mgr_init(xe);
+ err = xe_ttm_stolen_mgr_init(xe);
+ if (err)
+ return err;
/*
* Now that GT is initialized (TTM in particular),
@@ -712,93 +866,108 @@ int xe_device_probe(struct xe_device *xe)
* This is the reason the first allocation needs to be done
* inside display.
*/
- err = xe_display_init_noaccel(xe);
+ err = xe_display_init_early(xe);
if (err)
- goto err;
+ return err;
- for_each_gt(gt, xe, id) {
- last_gt = id;
+ for_each_tile(tile, xe, id) {
+ err = xe_tile_init(tile);
+ if (err)
+ return err;
+ }
+ err = xe_irq_install(xe);
+ if (err)
+ return err;
+
+ for_each_gt(gt, xe, id) {
err = xe_gt_init(gt);
if (err)
- goto err_fini_gt;
+ return err;
}
- xe_heci_gsc_init(xe);
+ err = xe_heci_gsc_init(xe);
+ if (err)
+ return err;
err = xe_oa_init(xe);
if (err)
- goto err_fini_gt;
+ return err;
err = xe_display_init(xe);
if (err)
- goto err_fini_oa;
+ return err;
+
+ err = xe_pxp_init(xe);
+ if (err)
+ return err;
err = drm_dev_register(&xe->drm, 0);
if (err)
- goto err_fini_display;
+ return err;
xe_display_register(xe);
- xe_oa_register(xe);
+ err = xe_oa_register(xe);
+ if (err)
+ goto err_unregister_display;
+
+ err = xe_pmu_register(&xe->pmu);
+ if (err)
+ goto err_unregister_display;
+
+ err = xe_device_sysfs_init(xe);
+ if (err)
+ goto err_unregister_display;
xe_debugfs_register(xe);
- xe_hwmon_register(xe);
+ err = xe_hwmon_register(xe);
+ if (err)
+ goto err_unregister_display;
for_each_gt(gt, xe, id)
xe_gt_sanitize_freq(gt);
- return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
+ xe_vsec_init(xe);
-err_fini_display:
- xe_display_driver_remove(xe);
-
-err_fini_oa:
- xe_oa_fini(xe);
+ return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
-err_fini_gt:
- for_each_gt(gt, xe, id) {
- if (id < last_gt)
- xe_gt_remove(gt);
- else
- break;
- }
+err_unregister_display:
+ xe_display_unregister(xe);
-err:
- xe_display_fini(xe);
return err;
}
-static void xe_device_remove_display(struct xe_device *xe)
+void xe_device_remove(struct xe_device *xe)
{
xe_display_unregister(xe);
drm_dev_unplug(&xe->drm);
- xe_display_driver_remove(xe);
+
+ xe_bo_pci_dev_remove_all(xe);
}
-void xe_device_remove(struct xe_device *xe)
+void xe_device_shutdown(struct xe_device *xe)
{
struct xe_gt *gt;
u8 id;
- xe_oa_unregister(xe);
+ drm_dbg(&xe->drm, "Shutting down device\n");
- xe_device_remove_display(xe);
+ if (xe_driver_flr_disabled(xe)) {
+ xe_display_pm_shutdown(xe);
- xe_display_fini(xe);
+ xe_irq_suspend(xe);
- xe_oa_fini(xe);
+ for_each_gt(gt, xe, id)
+ xe_gt_shutdown(gt);
- xe_heci_gsc_fini(xe);
-
- for_each_gt(gt, xe, id)
- xe_gt_remove(gt);
-}
-
-void xe_device_shutdown(struct xe_device *xe)
-{
+ xe_display_pm_shutdown_late(xe);
+ } else {
+ /* BOOM! */
+ __xe_driver_flr(xe);
+ }
}
/**
@@ -812,11 +981,9 @@ void xe_device_shutdown(struct xe_device *xe)
*/
void xe_device_wmb(struct xe_device *xe)
{
- struct xe_gt *gt = xe_root_mmio_gt(xe);
-
wmb();
if (IS_DGFX(xe))
- xe_mmio_write32(gt, VF_CAP_REG, 0);
+ xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0);
}
/**
@@ -840,6 +1007,7 @@ void xe_device_wmb(struct xe_device *xe)
void xe_device_td_flush(struct xe_device *xe)
{
struct xe_gt *gt;
+ unsigned int fw_ref;
u8 id;
if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20)
@@ -854,10 +1022,11 @@ void xe_device_td_flush(struct xe_device *xe)
if (xe_gt_is_media_type(gt))
continue;
- if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GT))
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
return;
- xe_mmio_write32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
+ xe_mmio_write32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
/*
* FIXME: We can likely do better here with our choice of
* timeout. Currently we just assume the worst case, i.e. 150us,
@@ -865,36 +1034,36 @@ void xe_device_td_flush(struct xe_device *xe)
* scenario on current platforms if all cache entries are
* transient and need to be flushed..
*/
- if (xe_mmio_wait32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
+ if (xe_mmio_wait32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
150, NULL, false))
xe_gt_err_once(gt, "TD flush timeout\n");
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
}
void xe_device_l2_flush(struct xe_device *xe)
{
struct xe_gt *gt;
- int err;
+ unsigned int fw_ref;
gt = xe_root_mmio_gt(xe);
if (!XE_WA(gt, 16023588340))
return;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
return;
spin_lock(&gt->global_invl_lock);
- xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1);
+ xe_mmio_write32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1);
- if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true))
+ if (xe_mmio_wait32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true))
xe_gt_err_once(gt, "Global invalidation timeout\n");
spin_unlock(&gt->global_invl_lock);
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
@@ -929,6 +1098,7 @@ void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p)
for_each_gt(gt, xe, id) {
drm_printf(p, "GT id: %u\n", id);
+ drm_printf(p, "\tTile: %u\n", gt->tile->id);
drm_printf(p, "\tType: %s\n",
gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media");
drm_printf(p, "\tIP ver: %u.%u.%u\n",
@@ -960,11 +1130,12 @@ static void xe_device_wedged_fini(struct drm_device *drm, void *arg)
* xe_device_declare_wedged - Declare device wedged
* @xe: xe device instance
*
- * This is a final state that can only be cleared with a mudule
+ * This is a final state that can only be cleared with a module
* re-probe (unbind + bind).
* In this state every IOCTL will be blocked so the GT cannot be used.
* In general it will be called upon any critical error such as gt reset
- * failure or guc loading failure.
+ * failure or guc loading failure. Userspace will be notified of this state
+ * through device wedged uevent.
* If xe.wedged module parameter is set to 2, this function will be called
* on every single execution timeout (a.k.a. GPU hang) right after devcoredump
* snapshot capture. In this mode, GT reset won't be attempted so the state of
@@ -994,6 +1165,10 @@ void xe_device_declare_wedged(struct xe_device *xe)
"IOCTLs and executions are blocked. Only a rebind may clear the failure\n"
"Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n",
dev_name(xe->drm.dev));
+
+ /* Notify userspace of wedged device */
+ drm_dev_wedged_event(&xe->drm,
+ DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET);
}
for_each_gt(gt, xe, id)
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 894f04770454..0bc3bc8e6803 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -9,6 +9,8 @@
#include <drm/drm_util.h>
#include "xe_device_types.h"
+#include "xe_gt_types.h"
+#include "xe_sriov.h"
static inline struct xe_device *to_xe_device(const struct drm_device *dev)
{
@@ -138,7 +140,7 @@ static inline bool xe_device_uc_enabled(struct xe_device *xe)
static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt)
{
- return &gt->mmio.fw;
+ return &gt->pm.fw;
}
void xe_device_assert_mem_access(struct xe_device *xe);
@@ -153,11 +155,26 @@ static inline bool xe_device_has_sriov(struct xe_device *xe)
return xe->info.has_sriov;
}
+static inline bool xe_device_has_msix(struct xe_device *xe)
+{
+ return xe->irq.msix.nvec > 0;
+}
+
static inline bool xe_device_has_memirq(struct xe_device *xe)
{
return GRAPHICS_VERx100(xe) >= 1250;
}
+static inline bool xe_device_uses_memirq(struct xe_device *xe)
+{
+ return xe_device_has_memirq(xe) && (IS_SRIOV_VF(xe) || xe_device_has_msix(xe));
+}
+
+static inline bool xe_device_has_lmtt(struct xe_device *xe)
+{
+ return IS_DGFX(xe);
+}
+
u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size);
void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p);
@@ -178,4 +195,18 @@ void xe_device_declare_wedged(struct xe_device *xe);
struct xe_file *xe_file_get(struct xe_file *xef);
void xe_file_put(struct xe_file *xef);
+/*
+ * Occasionally it is seen that the G2H worker starts running after a delay of more than
+ * a second even after being queued and activated by the Linux workqueue subsystem. This
+ * leads to G2H timeout error. The root cause of issue lies with scheduling latency of
+ * Lunarlake Hybrid CPU. Issue disappears if we disable Lunarlake atom cores from BIOS
+ * and this is beyond xe kmd.
+ *
+ * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
+ */
+#define LNL_FLUSH_WORKQUEUE(wq__) \
+ flush_workqueue(wq__)
+#define LNL_FLUSH_WORK(wrk__) \
+ flush_work(wrk__)
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
index 7375937934fa..b9440f8c781e 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -3,14 +3,16 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/device.h>
#include <linux/kobject.h>
#include <linux/pci.h>
#include <linux/sysfs.h>
-#include <drm/drm_managed.h>
-
#include "xe_device.h"
#include "xe_device_sysfs.h"
+#include "xe_mmio.h"
+#include "xe_pcode_api.h"
+#include "xe_pcode.h"
#include "xe_pm.h"
/**
@@ -32,9 +34,6 @@ vram_d3cold_threshold_show(struct device *dev,
struct xe_device *xe = pdev_to_xe_device(pdev);
int ret;
- if (!xe)
- return -EINVAL;
-
xe_pm_runtime_get(xe);
ret = sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold);
xe_pm_runtime_put(xe);
@@ -51,9 +50,6 @@ vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
u32 vram_d3cold_threshold;
int ret;
- if (!xe)
- return -EINVAL;
-
ret = kstrtou32(buff, 0, &vram_d3cold_threshold);
if (ret)
return ret;
@@ -69,11 +65,94 @@ vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(vram_d3cold_threshold);
+/**
+ * DOC: PCIe Gen5 Limitations
+ *
+ * Default link speed of discrete GPUs is determined by configuration parameters
+ * stored in their flash memory, which are subject to override through user
+ * initiated firmware updates. It has been observed that devices configured with
+ * PCIe Gen5 as their default link speed can come across link quality issues due
+ * to host or motherboard limitations and may have to auto-downgrade their link
+ * to PCIe Gen4 speed when faced with unstable link at Gen5, which makes
+ * firmware updates rather risky on such setups. It is required to ensure that
+ * the device is capable of auto-downgrading its link to PCIe Gen4 speed before
+ * pushing the firmware image with PCIe Gen5 as default configuration. This can
+ * be done by reading ``auto_link_downgrade_capable`` sysfs entry, which will
+ * denote if the device is capable of auto-downgrading its link to PCIe Gen4
+ * speed with boolean output value of ``0`` or ``1``, meaning `incapable` or
+ * `capable` respectively.
+ *
+ * .. code-block:: shell
+ *
+ * $ cat /sys/bus/pci/devices/<bdf>/auto_link_downgrade_capable
+ *
+ * Pushing the firmware image with PCIe Gen5 as default configuration on a auto
+ * link downgrade incapable device and facing link instability due to host or
+ * motherboard limitations can result in driver failing to bind to the device,
+ * making further firmware updates impossible with RMA being the only last
+ * resort.
+ *
+ * Link downgrade status of auto link downgrade capable devices is available
+ * through ``auto_link_downgrade_status`` sysfs entry with boolean output value
+ * of ``0`` or ``1``, where ``0`` means no auto-downgrading was required during
+ * link training (which is the optimal scenario) and ``1`` means the device has
+ * auto-downgraded its link to PCIe Gen4 speed due to unstable Gen5 link.
+ *
+ * .. code-block:: shell
+ *
+ * $ cat /sys/bus/pci/devices/<bdf>/auto_link_downgrade_status
+ */
+
+static ssize_t
+auto_link_downgrade_capable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ u32 cap, val;
+
+ xe_pm_runtime_get(xe);
+ val = xe_mmio_read32(xe_root_tile_mmio(xe), BMG_PCIE_CAP);
+ xe_pm_runtime_put(xe);
+
+ cap = REG_FIELD_GET(LINK_DOWNGRADE, val);
+ return sysfs_emit(buf, "%u\n", cap == DOWNGRADE_CAPABLE);
+}
+static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_capable);
+
+static ssize_t
+auto_link_downgrade_status_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ /* default the auto_link_downgrade status to 0 */
+ u32 val = 0;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+ ret = xe_pcode_read(xe_device_get_root_tile(xe),
+ PCODE_MBOX(DGFX_PCODE_STATUS, DGFX_GET_INIT_STATUS, 0),
+ &val, NULL);
+ xe_pm_runtime_put(xe);
+
+ return ret ?: sysfs_emit(buf, "%u\n", REG_FIELD_GET(DGFX_LINK_DOWNGRADE_STATUS, val));
+}
+static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_status);
+
+static const struct attribute *auto_link_downgrade_attrs[] = {
+ &dev_attr_auto_link_downgrade_capable.attr,
+ &dev_attr_auto_link_downgrade_status.attr,
+ NULL
+};
+
static void xe_device_sysfs_fini(void *arg)
{
struct xe_device *xe = arg;
- sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+ if (xe->d3cold.capable)
+ sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+
+ if (xe->info.platform == XE_BATTLEMAGE)
+ sysfs_remove_files(&xe->drm.dev->kobj, auto_link_downgrade_attrs);
}
int xe_device_sysfs_init(struct xe_device *xe)
@@ -81,9 +160,17 @@ int xe_device_sysfs_init(struct xe_device *xe)
struct device *dev = xe->drm.dev;
int ret;
- ret = sysfs_create_file(&dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
- if (ret)
- return ret;
+ if (xe->d3cold.capable) {
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+ if (ret)
+ return ret;
+ }
+
+ if (xe->info.platform == XE_BATTLEMAGE) {
+ ret = sysfs_create_files(&dev->kobj, auto_link_downgrade_attrs);
+ if (ret)
+ return ret;
+ }
return devm_add_action_or_reset(dev, xe_device_sysfs_fini, xe);
}
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 09d731a9125c..6383a1c0d478 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -10,31 +10,34 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
+#include <drm/drm_pagemap.h>
#include <drm/ttm/ttm_device.h>
#include "xe_devcoredump_types.h"
#include "xe_heci_gsc.h"
-#include "xe_gt_types.h"
#include "xe_lmtt_types.h"
#include "xe_memirq_types.h"
-#include "xe_oa.h"
+#include "xe_oa_types.h"
#include "xe_platform_types.h"
+#include "xe_pmu_types.h"
#include "xe_pt_types.h"
#include "xe_sriov_types.h"
#include "xe_step_types.h"
+#include "xe_survivability_mode_types.h"
+#include "xe_ttm_vram_mgr_types.h"
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
#define TEST_VM_OPS_ERROR
#endif
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
-#include "soc/intel_pch.h"
#include "intel_display_core.h"
#include "intel_display_device.h"
#endif
struct xe_ggtt;
struct xe_pat_ops;
+struct xe_pxp;
#define XE_BO_INVALID_OFFSET LONG_MAX
@@ -43,8 +46,6 @@ struct xe_pat_ops;
#define GRAPHICS_VERx100(xe) ((xe)->info.graphics_verx100)
#define MEDIA_VERx100(xe) ((xe)->info.media_verx100)
#define IS_DGFX(xe) ((xe)->info.is_dgfx)
-#define HAS_HECI_GSCFI(xe) ((xe)->info.has_heci_gscfi)
-#define HAS_HECI_CSCFI(xe) ((xe)->info.has_heci_cscfi)
#define XE_VRAM_FLAGS_NEED64K BIT(0)
@@ -70,11 +71,11 @@ struct xe_pat_ops;
struct xe_tile * : (tile__)->xe)
/**
- * struct xe_mem_region - memory region structure
+ * struct xe_vram_region - memory region structure
* This is used to describe a memory region in xe
* device, such as HBM memory or CXL extension memory.
*/
-struct xe_mem_region {
+struct xe_vram_region {
/** @io_start: IO start address of this VRAM instance */
resource_size_t io_start;
/**
@@ -105,6 +106,62 @@ struct xe_mem_region {
resource_size_t actual_physical_size;
/** @mapping: pointer to VRAM mappable space */
void __iomem *mapping;
+ /** @ttm: VRAM TTM manager */
+ struct xe_ttm_vram_mgr ttm;
+#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+ /** @pagemap: Used to remap device memory as ZONE_DEVICE */
+ struct dev_pagemap pagemap;
+ /**
+ * @dpagemap: The struct drm_pagemap of the ZONE_DEVICE memory
+ * pages of this tile.
+ */
+ struct drm_pagemap dpagemap;
+ /**
+ * @hpa_base: base host physical address
+ *
+ * This is generated when remap device memory as ZONE_DEVICE
+ */
+ resource_size_t hpa_base;
+#endif
+};
+
+/**
+ * struct xe_mmio - register mmio structure
+ *
+ * Represents an MMIO region that the CPU may use to access registers. A
+ * region may share its IO map with other regions (e.g., all GTs within a
+ * tile share the same map with their parent tile, but represent different
+ * subregions of the overall IO space).
+ */
+struct xe_mmio {
+ /** @tile: Backpointer to tile, used for tracing */
+ struct xe_tile *tile;
+
+ /** @regs: Map used to access registers. */
+ void __iomem *regs;
+
+ /**
+ * @sriov_vf_gt: Backpointer to GT.
+ *
+ * This pointer is only set for GT MMIO regions and only when running
+ * as an SRIOV VF structure
+ */
+ struct xe_gt *sriov_vf_gt;
+
+ /**
+ * @regs_size: Length of the register region within the map.
+ *
+ * The size of the iomap set in *regs is generally larger than the
+ * register mmio space since it includes unused regions and/or
+ * non-register regions such as the GGTT PTEs.
+ */
+ size_t regs_size;
+
+ /** @adj_limit: adjust MMIO address if address is below this value */
+ u32 adj_limit;
+
+ /** @adj_offset: offset to add to MMIO address when adjusting */
+ u32 adj_offset;
};
/**
@@ -148,26 +205,7 @@ struct xe_tile {
* * 4MB-8MB: reserved
* * 8MB-16MB: global GTT
*/
- struct {
- /** @mmio.size: size of tile's MMIO space */
- size_t size;
-
- /** @mmio.regs: pointer to tile's MMIO space (starting with registers) */
- void __iomem *regs;
- } mmio;
-
- /**
- * @mmio_ext: MMIO-extension info for a tile.
- *
- * Each tile has its own additional 256MB (28-bit) MMIO-extension space.
- */
- struct {
- /** @mmio_ext.size: size of tile's additional MMIO-extension space */
- size_t size;
-
- /** @mmio_ext.regs: pointer to tile's additional MMIO-extension space */
- void __iomem *regs;
- } mmio_ext;
+ struct xe_mmio mmio;
/** @mem: memory management info for tile */
struct {
@@ -177,10 +215,7 @@ struct xe_tile {
* Although VRAM is associated with a specific tile, it can
* still be accessed by all tiles' GTs.
*/
- struct xe_mem_region vram;
-
- /** @mem.vram_mgr: VRAM TTM manager */
- struct xe_ttm_vram_mgr *vram_mgr;
+ struct xe_vram_region vram;
/** @mem.ggtt: Global graphics translation table */
struct xe_ggtt *ggtt;
@@ -200,14 +235,14 @@ struct xe_tile {
struct xe_lmtt lmtt;
} pf;
struct {
- /** @sriov.vf.memirq: Memory Based Interrupts. */
- struct xe_memirq memirq;
-
/** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */
struct xe_ggtt_node *ggtt_balloon[2];
} vf;
} sriov;
+ /** @memirq: Memory Based Interrupts. */
+ struct xe_memirq memirq;
+
/** @pcode: tile's PCODE */
struct {
/** @pcode.lock: protecting tile's PCODE mailbox data */
@@ -239,8 +274,6 @@ struct xe_device {
const char *graphics_name;
/** @info.media_name: media IP name */
const char *media_name;
- /** @info.tile_mmio_ext_size: size of MMIO extension space, per-tile */
- u32 tile_mmio_ext_size;
/** @info.graphics_verx100: graphics IP version */
u32 graphics_verx100;
/** @info.media_verx100: media IP version */
@@ -270,24 +303,46 @@ struct xe_device {
/** @info.va_bits: Maximum bits of a virtual address */
u8 va_bits;
- /** @info.is_dgfx: is discrete device */
- u8 is_dgfx:1;
- /** @info.has_asid: Has address space ID */
- u8 has_asid:1;
+ /*
+ * Keep all flags below alphabetically sorted
+ */
+
/** @info.force_execlist: Forced execlist submission */
u8 force_execlist:1;
+ /** @info.has_asid: Has address space ID */
+ u8 has_asid:1;
+ /** @info.has_atomic_enable_pte_bit: Device has atomic enable PTE bit */
+ u8 has_atomic_enable_pte_bit:1;
+ /** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */
+ u8 has_device_atomics_on_smem:1;
+ /** @info.has_fan_control: Device supports fan control */
+ u8 has_fan_control:1;
/** @info.has_flat_ccs: Whether flat CCS metadata is used */
u8 has_flat_ccs:1;
+ /** @info.has_heci_cscfi: device has heci cscfi */
+ u8 has_heci_cscfi:1;
+ /** @info.has_heci_gscfi: device has heci gscfi */
+ u8 has_heci_gscfi:1;
/** @info.has_llc: Device has a shared CPU+GPU last level cache */
u8 has_llc:1;
- /** @info.has_mmio_ext: Device has extra MMIO address range */
- u8 has_mmio_ext:1;
+ /** @info.has_mbx_power_limits: Device has support to manage power limits using
+ * pcode mailbox commands.
+ */
+ u8 has_mbx_power_limits:1;
+ /** @info.has_pxp: Device has PXP support */
+ u8 has_pxp:1;
/** @info.has_range_tlb_invalidation: Has range based TLB invalidations */
u8 has_range_tlb_invalidation:1;
/** @info.has_sriov: Supports SR-IOV */
u8 has_sriov:1;
/** @info.has_usm: Device has unified shared memory support */
u8 has_usm:1;
+ /** @info.has_64bit_timestamp: Device supports 64-bit timestamps */
+ u8 has_64bit_timestamp:1;
+ /** @info.is_dgfx: is discrete device */
+ u8 is_dgfx:1;
+ /** @info.needs_scratch: needs scratch page for oob prefetch to work */
+ u8 needs_scratch:1;
/**
* @info.probe_display: Probe display hardware. If set to
* false, the driver will behave as if there is no display
@@ -297,29 +352,32 @@ struct xe_device {
* state the firmware or bootloader left it in.
*/
u8 probe_display:1;
+ /** @info.skip_guc_pc: Skip GuC based PM feature init */
+ u8 skip_guc_pc:1;
/** @info.skip_mtcfg: skip Multi-Tile configuration from MTCFG register */
u8 skip_mtcfg:1;
/** @info.skip_pcode: skip access to PCODE uC */
u8 skip_pcode:1;
- /** @info.has_heci_gscfi: device has heci gscfi */
- u8 has_heci_gscfi:1;
- /** @info.has_heci_cscfi: device has heci cscfi */
- u8 has_heci_cscfi:1;
- /** @info.skip_guc_pc: Skip GuC based PM feature init */
- u8 skip_guc_pc:1;
- /** @info.has_atomic_enable_pte_bit: Device has atomic enable PTE bit */
- u8 has_atomic_enable_pte_bit:1;
- /** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */
- u8 has_device_atomics_on_smem:1;
} info;
+ /** @survivability: survivability information for device */
+ struct xe_survivability survivability;
+
/** @irq: device interrupt state */
struct {
/** @irq.lock: lock for processing irq's on this device */
spinlock_t lock;
/** @irq.enabled: interrupts enabled on this device */
- bool enabled;
+ atomic_t enabled;
+
+ /** @irq.msix: irq info for platforms that support MSI-X */
+ struct {
+ /** @irq.msix.nvec: number of MSI-X interrupts */
+ u16 nvec;
+ /** @irq.msix.indexes: used to allocate MSI-X indexes */
+ struct xarray indexes;
+ } msix;
} irq;
/** @ttm: ttm device */
@@ -336,9 +394,11 @@ struct xe_device {
/** @mem: memory info for device */
struct {
/** @mem.vram: VRAM info for device */
- struct xe_mem_region vram;
+ struct xe_vram_region vram;
/** @mem.sys_mgr: system TTM manager */
struct ttm_resource_manager sys_mgr;
+ /** @mem.sys_mgr: system memory shrinker. */
+ struct xe_shrinker *shrinker;
} mem;
/** @sriov: device level virtualization data */
@@ -348,20 +408,13 @@ struct xe_device {
/** @sriov.pf: PF specific data */
struct xe_device_pf pf;
+ /** @sriov.vf: VF specific data */
+ struct xe_device_vf vf;
/** @sriov.wq: workqueue used by the virtualization workers */
struct workqueue_struct *wq;
} sriov;
- /** @clients: drm clients info */
- struct {
- /** @clients.lock: Protects drm clients info */
- spinlock_t lock;
-
- /** @clients.count: number of drm clients */
- u64 count;
- } clients;
-
/** @usm: unified memory state */
struct {
/** @usm.asid: convert a ASID to VM */
@@ -376,12 +429,22 @@ struct xe_device {
struct {
/** @pinned.lock: protected pinned BO list state */
spinlock_t lock;
- /** @pinned.kernel_bo_present: pinned kernel BO that are present */
- struct list_head kernel_bo_present;
- /** @pinned.evicted: pinned BO that have been evicted */
- struct list_head evicted;
- /** @pinned.external_vram: pinned external BO in vram*/
- struct list_head external_vram;
+ /** @pinned.early: early pinned lists */
+ struct {
+ /** @pinned.early.kernel_bo_present: pinned kernel BO that are present */
+ struct list_head kernel_bo_present;
+ /** @pinned.early.evicted: pinned BO that have been evicted */
+ struct list_head evicted;
+ } early;
+ /** @pinned.late: late pinned lists */
+ struct {
+ /** @pinned.late.kernel_bo_present: pinned kernel BO that are present */
+ struct list_head kernel_bo_present;
+ /** @pinned.late.evicted: pinned BO that have been evicted */
+ struct list_head evicted;
+ /** @pinned.external: pinned external and dma-buf. */
+ struct list_head external;
+ } late;
} pinned;
/** @ufence_wq: user fence wait queue */
@@ -464,6 +527,15 @@ struct xe_device {
struct mutex lock;
} d3cold;
+ /** @pm_notifier: Our PM notifier to perform actions in response to various PM events. */
+ struct notifier_block pm_notifier;
+
+ /** @pmt: Support the PMT driver callback interface */
+ struct {
+ /** @pmt.lock: protect access for telemetry data */
+ struct mutex lock;
+ } pmt;
+
/**
* @pm_callback_task: Track the active task that is running in either
* the runtime_suspend or runtime_resume callbacks.
@@ -479,6 +551,9 @@ struct xe_device {
/** @oa: oa observation subsystem */
struct xe_oa oa;
+ /** @pxp: Encapsulate Protected Xe Path support */
+ struct xe_pxp *pxp;
+
/** @needs_flr_on_fini: requests function-reset on fini */
bool needs_flr_on_fini;
@@ -490,6 +565,17 @@ struct xe_device {
int mode;
} wedged;
+ /** @bo_device: Struct to control async free of BOs */
+ struct xe_bo_dev {
+ /** @bo_device.async_free: Free worker */
+ struct work_struct async_free;
+ /** @bo_device.async_list: List of BOs to be freed */
+ struct llist_head async_list;
+ } bo_device;
+
+ /** @pmu: performance monitoring unit */
+ struct xe_pmu pmu;
+
#ifdef TEST_VM_OPS_ERROR
/**
* @vm_inject_error_position: inject errors at different places in VM
@@ -508,8 +594,6 @@ struct xe_device {
* migrating to the right sub-structs
*/
struct intel_display display;
- enum intel_pch pch_type;
- u16 pch_id;
struct dram_info {
bool wm_lv_0_adjust_needed;
@@ -524,6 +608,8 @@ struct xe_device {
INTEL_DRAM_DDR5,
INTEL_DRAM_LPDDR5,
INTEL_DRAM_GDDR,
+ INTEL_DRAM_GDDR_ECC,
+ __INTEL_DRAM_TYPE_MAX,
} type;
u8 num_qgv_points;
u8 num_psf_gv_points;
@@ -551,8 +637,6 @@ struct xe_device {
unsigned int czclk_freq;
unsigned int fsb_freq, mem_freq, is_ddr3;
};
-
- void *pxp;
#endif
};
@@ -571,7 +655,7 @@ struct xe_file {
/** @vm.xe: xarray to store VMs */
struct xarray xa;
/**
- * @vm.lock: Protects VM lookup + reference and removal a from
+ * @vm.lock: Protects VM lookup + reference and removal from
* file xarray. Not an intended to be an outer lock which does
* thing while being held.
*/
@@ -584,10 +668,15 @@ struct xe_file {
struct xarray xa;
/**
* @exec_queue.lock: Protects exec queue lookup + reference and
- * removal a frommfile xarray. Not an intended to be an outer
- * lock which does thing while being held.
+ * removal from file xarray. Not intended to be an outer lock
+ * which does things while being held.
*/
struct mutex lock;
+ /**
+ * @exec_queue.pending_removal: items pending to be removed to
+ * synchronize GPU state update with ongoing query.
+ */
+ atomic_t pending_removal;
} exec_queue;
/** @run_ticks: hw engine class run time in ticks for this drm client */
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index 68f309f5e981..346f857f3837 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -20,7 +20,7 @@
#include "xe_ttm_vram_mgr.h"
#include "xe_vm.h"
-MODULE_IMPORT_NS(DMA_BUF);
+MODULE_IMPORT_NS("DMA_BUF");
static int xe_dma_buf_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attach)
@@ -58,7 +58,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
* 1) Avoid pinning in a placement not accessible to some importers.
* 2) Pinning in VRAM requires PIN accounting which is a to-do.
*/
- if (xe_bo_is_pinned(bo) && bo->ttm.resource->placement != XE_PL_TT) {
+ if (xe_bo_is_pinned(bo) && !xe_bo_is_mem_type(bo, XE_PL_TT)) {
drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n");
return -EINVAL;
}
@@ -145,10 +145,7 @@ static void xe_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- struct dma_buf *dma_buf = attach->dmabuf;
- struct xe_bo *bo = gem_to_xe_bo(dma_buf->priv);
-
- if (!xe_bo_is_vram(bo)) {
+ if (sg_page(sgt->sgl)) {
dma_unmap_sgtable(attach->dev, sgt, dir, 0);
sg_free_table(sgt);
kfree(sgt);
@@ -236,7 +233,7 @@ static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
struct drm_gem_object *obj = attach->importer_priv;
struct xe_bo *bo = gem_to_xe_bo(obj);
- XE_WARN_ON(xe_bo_evict(bo, false));
+ XE_WARN_ON(xe_bo_evict(bo));
}
static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index fb52a23e28f8..31f688e953d7 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -135,8 +135,8 @@ void xe_drm_client_add_bo(struct xe_drm_client *client,
XE_WARN_ON(bo->client);
XE_WARN_ON(!list_empty(&bo->client_link));
- spin_lock(&client->bos_lock);
bo->client = xe_drm_client_get(client);
+ spin_lock(&client->bos_lock);
list_add_tail(&bo->client_link, &client->bos_list);
spin_unlock(&client->bos_lock);
}
@@ -261,6 +261,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
if (man) {
drm_print_memory_stats(p,
&stats[mem_type],
+ DRM_GEM_OBJECT_ACTIVE |
DRM_GEM_OBJECT_RESIDENT |
(mem_type != XE_PL_SYSTEM ? 0 :
DRM_GEM_OBJECT_PURGEABLE),
@@ -269,6 +270,49 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
}
}
+static struct xe_hw_engine *any_engine(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned long gt_id;
+
+ for_each_gt(gt, xe, gt_id) {
+ struct xe_hw_engine *hwe = xe_gt_any_hw_engine(gt);
+
+ if (hwe)
+ return hwe;
+ }
+
+ return NULL;
+}
+
+static bool force_wake_get_any_engine(struct xe_device *xe,
+ struct xe_hw_engine **phwe,
+ unsigned int *pfw_ref)
+{
+ enum xe_force_wake_domains domain;
+ unsigned int fw_ref;
+ struct xe_hw_engine *hwe;
+ struct xe_force_wake *fw;
+
+ hwe = any_engine(xe);
+ if (!hwe)
+ return false;
+
+ domain = xe_hw_engine_to_fw_domain(hwe);
+ fw = gt_to_fw(hwe->gt);
+
+ fw_ref = xe_force_wake_get(fw, domain);
+ if (!xe_force_wake_ref_has_domain(fw_ref, domain)) {
+ xe_force_wake_put(fw, fw_ref);
+ return false;
+ }
+
+ *phwe = hwe;
+ *pfw_ref = fw_ref;
+
+ return true;
+}
+
static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
{
unsigned long class, i, gt_id, capacity[XE_ENGINE_CLASS_MAX] = { };
@@ -278,8 +322,28 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
struct xe_hw_engine *hwe;
struct xe_exec_queue *q;
u64 gpu_timestamp;
+ unsigned int fw_ref;
+
+ /*
+ * RING_TIMESTAMP registers are inaccessible in VF mode.
+ * Without drm-total-cycles-*, other keys provide little value.
+ * Show all or none of the optional "run_ticks" keys in this case.
+ */
+ if (IS_SRIOV_VF(xe))
+ return;
+
+ /*
+ * Wait for any exec queue going away: their cycles will get updated on
+ * context switch out, so wait for that to happen
+ */
+ wait_var_event(&xef->exec_queue.pending_removal,
+ !atomic_read(&xef->exec_queue.pending_removal));
xe_pm_runtime_get(xe);
+ if (!force_wake_get_any_engine(xe, &hwe, &fw_ref)) {
+ xe_pm_runtime_put(xe);
+ return;
+ }
/* Accumulate all the exec queues from this client */
mutex_lock(&xef->exec_queue.lock);
@@ -294,30 +358,11 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
}
mutex_unlock(&xef->exec_queue.lock);
- /* Get the total GPU cycles */
- for_each_gt(gt, xe, gt_id) {
- enum xe_force_wake_domains fw;
-
- hwe = xe_gt_any_hw_engine(gt);
- if (!hwe)
- continue;
-
- fw = xe_hw_engine_to_fw_domain(hwe);
- if (xe_force_wake_get(gt_to_fw(gt), fw)) {
- hwe = NULL;
- break;
- }
-
- gpu_timestamp = xe_hw_engine_read_timestamp(hwe);
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw));
- break;
- }
+ gpu_timestamp = xe_hw_engine_read_timestamp(hwe);
+ xe_force_wake_put(gt_to_fw(hwe->gt), fw_ref);
xe_pm_runtime_put(xe);
- if (unlikely(!hwe))
- return;
-
for (class = 0; class < XE_ENGINE_CLASS_MAX; class++) {
const char *class_name;
@@ -348,7 +393,7 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
* @p: The drm_printer ptr
* @file: The drm_file ptr
*
- * This is callabck for drm fdinfo interface. Register this callback
+ * This is callback for drm fdinfo interface. Register this callback
* in drm driver ops for show_fdinfo.
*
* Return: void
diff --git a/drivers/gpu/drm/xe/xe_drv.h b/drivers/gpu/drm/xe/xe_drv.h
index d45b71426cc8..d61650d4aa0b 100644
--- a/drivers/gpu/drm/xe/xe_drv.h
+++ b/drivers/gpu/drm/xe/xe_drv.h
@@ -10,7 +10,6 @@
#define DRIVER_NAME "xe"
#define DRIVER_DESC "Intel Xe Graphics"
-#define DRIVER_DATE "20201103"
/* Interface history:
*
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
new file mode 100644
index 000000000000..96732613b4b7
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -0,0 +1,964 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/anon_inodes.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/types.h>
+
+#include <drm/drm_drv.h>
+#include <generated/xe_wa_oob.h>
+#include <uapi/drm/xe_drm.h>
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_eu_stall.h"
+#include "xe_force_wake.h"
+#include "xe_gt_mcr.h"
+#include "xe_gt_printk.h"
+#include "xe_gt_topology.h"
+#include "xe_macros.h"
+#include "xe_observation.h"
+#include "xe_pm.h"
+#include "xe_trace.h"
+#include "xe_wa.h"
+
+#include "regs/xe_eu_stall_regs.h"
+#include "regs/xe_gt_regs.h"
+
+#define POLL_PERIOD_MS 5
+
+static size_t per_xecore_buf_size = SZ_512K;
+
+struct per_xecore_buf {
+ /* Buffer vaddr */
+ u8 *vaddr;
+ /* Write pointer */
+ u32 write;
+ /* Read pointer */
+ u32 read;
+};
+
+struct xe_eu_stall_data_stream {
+ bool pollin;
+ bool enabled;
+ int wait_num_reports;
+ int sampling_rate_mult;
+ wait_queue_head_t poll_wq;
+ size_t data_record_size;
+ size_t per_xecore_buf_size;
+
+ struct xe_gt *gt;
+ struct xe_bo *bo;
+ /* Lock to protect data buffer pointers */
+ struct mutex xecore_buf_lock;
+ struct per_xecore_buf *xecore_buf;
+ struct {
+ bool reported_to_user;
+ xe_dss_mask_t mask;
+ } data_drop;
+ struct delayed_work buf_poll_work;
+};
+
+struct xe_eu_stall_gt {
+ /* Lock to protect stream */
+ struct mutex stream_lock;
+ /* EU stall data stream */
+ struct xe_eu_stall_data_stream *stream;
+ /* Workqueue to schedule buffer pointers polling work */
+ struct workqueue_struct *buf_ptr_poll_wq;
+};
+
+/**
+ * struct eu_stall_open_properties - EU stall sampling properties received
+ * from user space at open.
+ * @sampling_rate_mult: EU stall sampling rate multiplier.
+ * HW will sample every (sampling_rate_mult x 251) cycles.
+ * @wait_num_reports: Minimum number of EU stall data reports to unblock poll().
+ * @gt: GT on which EU stall data will be captured.
+ */
+struct eu_stall_open_properties {
+ int sampling_rate_mult;
+ int wait_num_reports;
+ struct xe_gt *gt;
+};
+
+/*
+ * EU stall data format for PVC
+ */
+struct xe_eu_stall_data_pvc {
+ __u64 ip_addr:29; /* Bits 0 to 28 */
+ __u64 active_count:8; /* Bits 29 to 36 */
+ __u64 other_count:8; /* Bits 37 to 44 */
+ __u64 control_count:8; /* Bits 45 to 52 */
+ __u64 pipestall_count:8; /* Bits 53 to 60 */
+ __u64 send_count:8; /* Bits 61 to 68 */
+ __u64 dist_acc_count:8; /* Bits 69 to 76 */
+ __u64 sbid_count:8; /* Bits 77 to 84 */
+ __u64 sync_count:8; /* Bits 85 to 92 */
+ __u64 inst_fetch_count:8; /* Bits 93 to 100 */
+ __u64 unused_bits:27;
+ __u64 unused[6];
+} __packed;
+
+/*
+ * EU stall data format for Xe2 arch GPUs (LNL, BMG).
+ */
+struct xe_eu_stall_data_xe2 {
+ __u64 ip_addr:29; /* Bits 0 to 28 */
+ __u64 tdr_count:8; /* Bits 29 to 36 */
+ __u64 other_count:8; /* Bits 37 to 44 */
+ __u64 control_count:8; /* Bits 45 to 52 */
+ __u64 pipestall_count:8; /* Bits 53 to 60 */
+ __u64 send_count:8; /* Bits 61 to 68 */
+ __u64 dist_acc_count:8; /* Bits 69 to 76 */
+ __u64 sbid_count:8; /* Bits 77 to 84 */
+ __u64 sync_count:8; /* Bits 85 to 92 */
+ __u64 inst_fetch_count:8; /* Bits 93 to 100 */
+ __u64 active_count:8; /* Bits 101 to 108 */
+ __u64 ex_id:3; /* Bits 109 to 111 */
+ __u64 end_flag:1; /* Bit 112 */
+ __u64 unused_bits:15;
+ __u64 unused[6];
+} __packed;
+
+const u64 eu_stall_sampling_rates[] = {251, 251 * 2, 251 * 3, 251 * 4, 251 * 5, 251 * 6, 251 * 7};
+
+/**
+ * xe_eu_stall_get_sampling_rates - get EU stall sampling rates information.
+ *
+ * @num_rates: Pointer to a u32 to return the number of sampling rates.
+ * @rates: double u64 pointer to point to an array of sampling rates.
+ *
+ * Stores the number of sampling rates and pointer to the array of
+ * sampling rates in the input pointers.
+ *
+ * Returns: Size of the EU stall sampling rates array.
+ */
+size_t xe_eu_stall_get_sampling_rates(u32 *num_rates, const u64 **rates)
+{
+ *num_rates = ARRAY_SIZE(eu_stall_sampling_rates);
+ *rates = eu_stall_sampling_rates;
+
+ return sizeof(eu_stall_sampling_rates);
+}
+
+/**
+ * xe_eu_stall_get_per_xecore_buf_size - get per XeCore buffer size.
+ *
+ * Returns: The per XeCore buffer size used to allocate the per GT
+ * EU stall data buffer.
+ */
+size_t xe_eu_stall_get_per_xecore_buf_size(void)
+{
+ return per_xecore_buf_size;
+}
+
+/**
+ * xe_eu_stall_data_record_size - get EU stall data record size.
+ *
+ * @xe: Pointer to a Xe device.
+ *
+ * Returns: EU stall data record size.
+ */
+size_t xe_eu_stall_data_record_size(struct xe_device *xe)
+{
+ size_t record_size = 0;
+
+ if (xe->info.platform == XE_PVC)
+ record_size = sizeof(struct xe_eu_stall_data_pvc);
+ else if (GRAPHICS_VER(xe) >= 20)
+ record_size = sizeof(struct xe_eu_stall_data_xe2);
+
+ xe_assert(xe, is_power_of_2(record_size));
+
+ return record_size;
+}
+
+/**
+ * num_data_rows - Return the number of EU stall data rows of 64B each
+ * for a given data size.
+ *
+ * @data_size: EU stall data size
+ */
+static u32 num_data_rows(u32 data_size)
+{
+ return data_size >> 6;
+}
+
+static void xe_eu_stall_fini(void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ destroy_workqueue(gt->eu_stall->buf_ptr_poll_wq);
+ mutex_destroy(&gt->eu_stall->stream_lock);
+ kfree(gt->eu_stall);
+}
+
+/**
+ * xe_eu_stall_init() - Allocate and initialize GT level EU stall data
+ * structure xe_eu_stall_gt within struct xe_gt.
+ *
+ * @gt: GT being initialized.
+ *
+ * Returns: zero on success or a negative error code.
+ */
+int xe_eu_stall_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int ret;
+
+ if (!xe_eu_stall_supported_on_platform(xe))
+ return 0;
+
+ gt->eu_stall = kzalloc(sizeof(*gt->eu_stall), GFP_KERNEL);
+ if (!gt->eu_stall) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ mutex_init(&gt->eu_stall->stream_lock);
+
+ gt->eu_stall->buf_ptr_poll_wq = alloc_ordered_workqueue("xe_eu_stall", 0);
+ if (!gt->eu_stall->buf_ptr_poll_wq) {
+ ret = -ENOMEM;
+ goto exit_free;
+ }
+
+ return devm_add_action_or_reset(xe->drm.dev, xe_eu_stall_fini, gt);
+exit_free:
+ mutex_destroy(&gt->eu_stall->stream_lock);
+ kfree(gt->eu_stall);
+exit:
+ return ret;
+}
+
+static int set_prop_eu_stall_sampling_rate(struct xe_device *xe, u64 value,
+ struct eu_stall_open_properties *props)
+{
+ value = div_u64(value, 251);
+ if (value == 0 || value > 7) {
+ drm_dbg(&xe->drm, "Invalid EU stall sampling rate %llu\n", value);
+ return -EINVAL;
+ }
+ props->sampling_rate_mult = value;
+ return 0;
+}
+
+static int set_prop_eu_stall_wait_num_reports(struct xe_device *xe, u64 value,
+ struct eu_stall_open_properties *props)
+{
+ props->wait_num_reports = value;
+
+ return 0;
+}
+
+static int set_prop_eu_stall_gt_id(struct xe_device *xe, u64 value,
+ struct eu_stall_open_properties *props)
+{
+ if (value >= xe->info.gt_count) {
+ drm_dbg(&xe->drm, "Invalid GT ID %llu for EU stall sampling\n", value);
+ return -EINVAL;
+ }
+ props->gt = xe_device_get_gt(xe, value);
+ return 0;
+}
+
+typedef int (*set_eu_stall_property_fn)(struct xe_device *xe, u64 value,
+ struct eu_stall_open_properties *props);
+
+static const set_eu_stall_property_fn xe_set_eu_stall_property_funcs[] = {
+ [DRM_XE_EU_STALL_PROP_SAMPLE_RATE] = set_prop_eu_stall_sampling_rate,
+ [DRM_XE_EU_STALL_PROP_WAIT_NUM_REPORTS] = set_prop_eu_stall_wait_num_reports,
+ [DRM_XE_EU_STALL_PROP_GT_ID] = set_prop_eu_stall_gt_id,
+};
+
+static int xe_eu_stall_user_ext_set_property(struct xe_device *xe, u64 extension,
+ struct eu_stall_open_properties *props)
+{
+ u64 __user *address = u64_to_user_ptr(extension);
+ struct drm_xe_ext_set_property ext;
+ int err;
+ u32 idx;
+
+ err = copy_from_user(&ext, address, sizeof(ext));
+ if (XE_IOCTL_DBG(xe, err))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(xe, ext.property >= ARRAY_SIZE(xe_set_eu_stall_property_funcs)) ||
+ XE_IOCTL_DBG(xe, ext.pad))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_set_eu_stall_property_funcs));
+ return xe_set_eu_stall_property_funcs[idx](xe, ext.value, props);
+}
+
+typedef int (*xe_eu_stall_user_extension_fn)(struct xe_device *xe, u64 extension,
+ struct eu_stall_open_properties *props);
+static const xe_eu_stall_user_extension_fn xe_eu_stall_user_extension_funcs[] = {
+ [DRM_XE_EU_STALL_EXTENSION_SET_PROPERTY] = xe_eu_stall_user_ext_set_property,
+};
+
+#define MAX_USER_EXTENSIONS 5
+static int xe_eu_stall_user_extensions(struct xe_device *xe, u64 extension,
+ int ext_number, struct eu_stall_open_properties *props)
+{
+ u64 __user *address = u64_to_user_ptr(extension);
+ struct drm_xe_user_extension ext;
+ int err;
+ u32 idx;
+
+ if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
+ return -E2BIG;
+
+ err = copy_from_user(&ext, address, sizeof(ext));
+ if (XE_IOCTL_DBG(xe, err))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(xe, ext.pad) ||
+ XE_IOCTL_DBG(xe, ext.name >= ARRAY_SIZE(xe_eu_stall_user_extension_funcs)))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_eu_stall_user_extension_funcs));
+ err = xe_eu_stall_user_extension_funcs[idx](xe, extension, props);
+ if (XE_IOCTL_DBG(xe, err))
+ return err;
+
+ if (ext.next_extension)
+ return xe_eu_stall_user_extensions(xe, ext.next_extension, ++ext_number, props);
+
+ return 0;
+}
+
+/**
+ * buf_data_size - Calculate the number of bytes in a circular buffer
+ * given the read and write pointers and the size of
+ * the buffer.
+ *
+ * @buf_size: Size of the circular buffer
+ * @read_ptr: Read pointer with an additional overflow bit
+ * @write_ptr: Write pointer with an additional overflow bit
+ *
+ * Since the read and write pointers have an additional overflow bit,
+ * this function calculates the offsets from the pointers and use the
+ * offsets to calculate the data size in the buffer.
+ *
+ * Returns: number of bytes of data in the buffer
+ */
+static u32 buf_data_size(size_t buf_size, u32 read_ptr, u32 write_ptr)
+{
+ u32 read_offset, write_offset, size = 0;
+
+ if (read_ptr == write_ptr)
+ goto exit;
+
+ read_offset = read_ptr & (buf_size - 1);
+ write_offset = write_ptr & (buf_size - 1);
+
+ if (write_offset > read_offset)
+ size = write_offset - read_offset;
+ else
+ size = buf_size - read_offset + write_offset;
+exit:
+ return size;
+}
+
+/**
+ * eu_stall_data_buf_poll - Poll for EU stall data in the buffer.
+ *
+ * @stream: xe EU stall data stream instance
+ *
+ * Returns: true if the EU stall buffer contains minimum stall data as
+ * specified by the event report count, else false.
+ */
+static bool eu_stall_data_buf_poll(struct xe_eu_stall_data_stream *stream)
+{
+ u32 read_ptr, write_ptr_reg, write_ptr, total_data = 0;
+ u32 buf_size = stream->per_xecore_buf_size;
+ struct per_xecore_buf *xecore_buf;
+ struct xe_gt *gt = stream->gt;
+ bool min_data_present = false;
+ u16 group, instance;
+ unsigned int xecore;
+
+ mutex_lock(&stream->xecore_buf_lock);
+ for_each_dss_steering(xecore, gt, group, instance) {
+ xecore_buf = &stream->xecore_buf[xecore];
+ read_ptr = xecore_buf->read;
+ write_ptr_reg = xe_gt_mcr_unicast_read(gt, XEHPC_EUSTALL_REPORT,
+ group, instance);
+ write_ptr = REG_FIELD_GET(XEHPC_EUSTALL_REPORT_WRITE_PTR_MASK, write_ptr_reg);
+ write_ptr <<= 6;
+ write_ptr &= ((buf_size << 1) - 1);
+ if (!min_data_present) {
+ total_data += buf_data_size(buf_size, read_ptr, write_ptr);
+ if (num_data_rows(total_data) >= stream->wait_num_reports)
+ min_data_present = true;
+ }
+ if (write_ptr_reg & XEHPC_EUSTALL_REPORT_OVERFLOW_DROP)
+ set_bit(xecore, stream->data_drop.mask);
+ xecore_buf->write = write_ptr;
+ }
+ mutex_unlock(&stream->xecore_buf_lock);
+
+ return min_data_present;
+}
+
+static void clear_dropped_eviction_line_bit(struct xe_gt *gt, u16 group, u16 instance)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 write_ptr_reg;
+
+ /* On PVC, the overflow bit has to be cleared by writing 1 to it.
+ * On Xe2 and later GPUs, the bit has to be cleared by writing 0 to it.
+ */
+ if (GRAPHICS_VER(xe) >= 20)
+ write_ptr_reg = _MASKED_BIT_DISABLE(XEHPC_EUSTALL_REPORT_OVERFLOW_DROP);
+ else
+ write_ptr_reg = _MASKED_BIT_ENABLE(XEHPC_EUSTALL_REPORT_OVERFLOW_DROP);
+
+ xe_gt_mcr_unicast_write(gt, XEHPC_EUSTALL_REPORT, write_ptr_reg, group, instance);
+}
+
+static int xe_eu_stall_data_buf_read(struct xe_eu_stall_data_stream *stream,
+ char __user *buf, size_t count,
+ size_t *total_data_size, struct xe_gt *gt,
+ u16 group, u16 instance, unsigned int xecore)
+{
+ size_t read_data_size, copy_size, buf_size;
+ u32 read_ptr_reg, read_ptr, write_ptr;
+ u8 *xecore_start_vaddr, *read_vaddr;
+ struct per_xecore_buf *xecore_buf;
+ u32 read_offset, write_offset;
+
+ /* Hardware increments the read and write pointers such that they can
+ * overflow into one additional bit. For example, a 256KB size buffer
+ * offset pointer needs 18 bits. But HW uses 19 bits for the read and
+ * write pointers. This technique avoids wasting a slot in the buffer.
+ * Read and write offsets are calculated from the pointers in order to
+ * check if the write pointer has wrapped around the array.
+ */
+ xecore_buf = &stream->xecore_buf[xecore];
+ xecore_start_vaddr = xecore_buf->vaddr;
+ read_ptr = xecore_buf->read;
+ write_ptr = xecore_buf->write;
+ buf_size = stream->per_xecore_buf_size;
+
+ read_data_size = buf_data_size(buf_size, read_ptr, write_ptr);
+ /* Read only the data that the user space buffer can accommodate */
+ read_data_size = min_t(size_t, count - *total_data_size, read_data_size);
+ if (read_data_size == 0)
+ goto exit_drop;
+
+ read_offset = read_ptr & (buf_size - 1);
+ write_offset = write_ptr & (buf_size - 1);
+ read_vaddr = xecore_start_vaddr + read_offset;
+
+ if (write_offset > read_offset) {
+ if (copy_to_user(buf + *total_data_size, read_vaddr, read_data_size))
+ return -EFAULT;
+ } else {
+ if (read_data_size >= buf_size - read_offset)
+ copy_size = buf_size - read_offset;
+ else
+ copy_size = read_data_size;
+ if (copy_to_user(buf + *total_data_size, read_vaddr, copy_size))
+ return -EFAULT;
+ if (copy_to_user(buf + *total_data_size + copy_size,
+ xecore_start_vaddr, read_data_size - copy_size))
+ return -EFAULT;
+ }
+
+ *total_data_size += read_data_size;
+ read_ptr += read_data_size;
+
+ /* Read pointer can overflow into one additional bit */
+ read_ptr &= (buf_size << 1) - 1;
+ read_ptr_reg = REG_FIELD_PREP(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, (read_ptr >> 6));
+ read_ptr_reg = _MASKED_FIELD(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, read_ptr_reg);
+ xe_gt_mcr_unicast_write(gt, XEHPC_EUSTALL_REPORT1, read_ptr_reg, group, instance);
+ xecore_buf->read = read_ptr;
+ trace_xe_eu_stall_data_read(group, instance, read_ptr, write_ptr,
+ read_data_size, *total_data_size);
+exit_drop:
+ /* Clear drop bit (if set) after any data was read or if the buffer was empty.
+ * Drop bit can be set even if the buffer is empty as the buffer may have been emptied
+ * in the previous read() and the data drop bit was set during the previous read().
+ */
+ if (test_bit(xecore, stream->data_drop.mask)) {
+ clear_dropped_eviction_line_bit(gt, group, instance);
+ clear_bit(xecore, stream->data_drop.mask);
+ }
+ return 0;
+}
+
+/**
+ * xe_eu_stall_stream_read_locked - copy EU stall counters data from the
+ * per xecore buffers to the userspace buffer
+ * @stream: A stream opened for EU stall count metrics
+ * @file: An xe EU stall data stream file
+ * @buf: destination buffer given by userspace
+ * @count: the number of bytes userspace wants to read
+ *
+ * Returns: Number of bytes copied or a negative error code
+ * If we've successfully copied any data then reporting that takes
+ * precedence over any internal error status, so the data isn't lost.
+ */
+static ssize_t xe_eu_stall_stream_read_locked(struct xe_eu_stall_data_stream *stream,
+ struct file *file, char __user *buf,
+ size_t count)
+{
+ struct xe_gt *gt = stream->gt;
+ size_t total_size = 0;
+ u16 group, instance;
+ unsigned int xecore;
+ int ret = 0;
+
+ mutex_lock(&stream->xecore_buf_lock);
+ if (bitmap_weight(stream->data_drop.mask, XE_MAX_DSS_FUSE_BITS)) {
+ if (!stream->data_drop.reported_to_user) {
+ stream->data_drop.reported_to_user = true;
+ xe_gt_dbg(gt, "EU stall data dropped in XeCores: %*pb\n",
+ XE_MAX_DSS_FUSE_BITS, stream->data_drop.mask);
+ mutex_unlock(&stream->xecore_buf_lock);
+ return -EIO;
+ }
+ stream->data_drop.reported_to_user = false;
+ }
+
+ for_each_dss_steering(xecore, gt, group, instance) {
+ ret = xe_eu_stall_data_buf_read(stream, buf, count, &total_size,
+ gt, group, instance, xecore);
+ if (ret || count == total_size)
+ break;
+ }
+ mutex_unlock(&stream->xecore_buf_lock);
+ return total_size ?: (ret ?: -EAGAIN);
+}
+
+/*
+ * Userspace must enable the EU stall stream with DRM_XE_OBSERVATION_IOCTL_ENABLE
+ * before calling read().
+ *
+ * Returns: The number of bytes copied or a negative error code on failure.
+ * -EIO if HW drops any EU stall data when the buffer is full.
+ */
+static ssize_t xe_eu_stall_stream_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct xe_eu_stall_data_stream *stream = file->private_data;
+ struct xe_gt *gt = stream->gt;
+ ssize_t ret, aligned_count;
+
+ aligned_count = ALIGN_DOWN(count, stream->data_record_size);
+ if (aligned_count == 0)
+ return -EINVAL;
+
+ if (!stream->enabled) {
+ xe_gt_dbg(gt, "EU stall data stream not enabled to read\n");
+ return -EINVAL;
+ }
+
+ if (!(file->f_flags & O_NONBLOCK)) {
+ do {
+ ret = wait_event_interruptible(stream->poll_wq, stream->pollin);
+ if (ret)
+ return -EINTR;
+
+ mutex_lock(&gt->eu_stall->stream_lock);
+ ret = xe_eu_stall_stream_read_locked(stream, file, buf, aligned_count);
+ mutex_unlock(&gt->eu_stall->stream_lock);
+ } while (ret == -EAGAIN);
+ } else {
+ mutex_lock(&gt->eu_stall->stream_lock);
+ ret = xe_eu_stall_stream_read_locked(stream, file, buf, aligned_count);
+ mutex_unlock(&gt->eu_stall->stream_lock);
+ }
+
+ /*
+ * This may not work correctly if the user buffer is very small.
+ * We don't want to block the next read() when there is data in the buffer
+ * now, but couldn't be accommodated in the small user buffer.
+ */
+ stream->pollin = false;
+
+ return ret;
+}
+
+static void xe_eu_stall_stream_free(struct xe_eu_stall_data_stream *stream)
+{
+ struct xe_gt *gt = stream->gt;
+
+ mutex_destroy(&stream->xecore_buf_lock);
+ gt->eu_stall->stream = NULL;
+ kfree(stream);
+}
+
+static void xe_eu_stall_data_buf_destroy(struct xe_eu_stall_data_stream *stream)
+{
+ xe_bo_unpin_map_no_vm(stream->bo);
+ kfree(stream->xecore_buf);
+}
+
+static int xe_eu_stall_data_buf_alloc(struct xe_eu_stall_data_stream *stream,
+ u16 last_xecore)
+{
+ struct xe_tile *tile = stream->gt->tile;
+ struct xe_bo *bo;
+ u32 size;
+
+ stream->xecore_buf = kcalloc(last_xecore, sizeof(*stream->xecore_buf), GFP_KERNEL);
+ if (!stream->xecore_buf)
+ return -ENOMEM;
+
+ size = stream->per_xecore_buf_size * last_xecore;
+
+ bo = xe_bo_create_pin_map_at_aligned(tile->xe, tile, NULL,
+ size, ~0ull, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT, SZ_64);
+ if (IS_ERR(bo)) {
+ kfree(stream->xecore_buf);
+ return PTR_ERR(bo);
+ }
+
+ XE_WARN_ON(!IS_ALIGNED(xe_bo_ggtt_addr(bo), SZ_64));
+ stream->bo = bo;
+
+ return 0;
+}
+
+static int xe_eu_stall_stream_enable(struct xe_eu_stall_data_stream *stream)
+{
+ u32 write_ptr_reg, write_ptr, read_ptr_reg, reg_value;
+ struct per_xecore_buf *xecore_buf;
+ struct xe_gt *gt = stream->gt;
+ u16 group, instance;
+ unsigned int fw_ref;
+ int xecore;
+
+ /* Take runtime pm ref and forcewake to disable RC6 */
+ xe_pm_runtime_get(gt_to_xe(gt));
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_RENDER);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_RENDER)) {
+ xe_gt_err(gt, "Failed to get RENDER forcewake\n");
+ xe_pm_runtime_put(gt_to_xe(gt));
+ return -ETIMEDOUT;
+ }
+
+ if (XE_WA(gt, 22016596838))
+ xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
+ _MASKED_BIT_ENABLE(DISABLE_DOP_GATING));
+
+ for_each_dss_steering(xecore, gt, group, instance) {
+ write_ptr_reg = xe_gt_mcr_unicast_read(gt, XEHPC_EUSTALL_REPORT, group, instance);
+ /* Clear any drop bits set and not cleared in the previous session. */
+ if (write_ptr_reg & XEHPC_EUSTALL_REPORT_OVERFLOW_DROP)
+ clear_dropped_eviction_line_bit(gt, group, instance);
+ write_ptr = REG_FIELD_GET(XEHPC_EUSTALL_REPORT_WRITE_PTR_MASK, write_ptr_reg);
+ read_ptr_reg = REG_FIELD_PREP(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, write_ptr);
+ read_ptr_reg = _MASKED_FIELD(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, read_ptr_reg);
+ /* Initialize the read pointer to the write pointer */
+ xe_gt_mcr_unicast_write(gt, XEHPC_EUSTALL_REPORT1, read_ptr_reg, group, instance);
+ write_ptr <<= 6;
+ write_ptr &= (stream->per_xecore_buf_size << 1) - 1;
+ xecore_buf = &stream->xecore_buf[xecore];
+ xecore_buf->write = write_ptr;
+ xecore_buf->read = write_ptr;
+ }
+ stream->data_drop.reported_to_user = false;
+ bitmap_zero(stream->data_drop.mask, XE_MAX_DSS_FUSE_BITS);
+
+ reg_value = _MASKED_FIELD(EUSTALL_MOCS | EUSTALL_SAMPLE_RATE,
+ REG_FIELD_PREP(EUSTALL_MOCS, gt->mocs.uc_index << 1) |
+ REG_FIELD_PREP(EUSTALL_SAMPLE_RATE,
+ stream->sampling_rate_mult));
+ xe_gt_mcr_multicast_write(gt, XEHPC_EUSTALL_CTRL, reg_value);
+ /* GGTT addresses can never be > 32 bits */
+ xe_gt_mcr_multicast_write(gt, XEHPC_EUSTALL_BASE_UPPER, 0);
+ reg_value = xe_bo_ggtt_addr(stream->bo);
+ reg_value |= REG_FIELD_PREP(XEHPC_EUSTALL_BASE_XECORE_BUF_SZ,
+ stream->per_xecore_buf_size / SZ_256K);
+ reg_value |= XEHPC_EUSTALL_BASE_ENABLE_SAMPLING;
+ xe_gt_mcr_multicast_write(gt, XEHPC_EUSTALL_BASE, reg_value);
+
+ return 0;
+}
+
+static void eu_stall_data_buf_poll_work_fn(struct work_struct *work)
+{
+ struct xe_eu_stall_data_stream *stream =
+ container_of(work, typeof(*stream), buf_poll_work.work);
+ struct xe_gt *gt = stream->gt;
+
+ if (eu_stall_data_buf_poll(stream)) {
+ stream->pollin = true;
+ wake_up(&stream->poll_wq);
+ }
+ queue_delayed_work(gt->eu_stall->buf_ptr_poll_wq,
+ &stream->buf_poll_work,
+ msecs_to_jiffies(POLL_PERIOD_MS));
+}
+
+static int xe_eu_stall_stream_init(struct xe_eu_stall_data_stream *stream,
+ struct eu_stall_open_properties *props)
+{
+ unsigned int max_wait_num_reports, xecore, last_xecore, num_xecores;
+ struct per_xecore_buf *xecore_buf;
+ struct xe_gt *gt = stream->gt;
+ xe_dss_mask_t all_xecores;
+ u16 group, instance;
+ u32 vaddr_offset;
+ int ret;
+
+ bitmap_or(all_xecores, gt->fuse_topo.g_dss_mask, gt->fuse_topo.c_dss_mask,
+ XE_MAX_DSS_FUSE_BITS);
+ num_xecores = bitmap_weight(all_xecores, XE_MAX_DSS_FUSE_BITS);
+ last_xecore = xe_gt_topology_mask_last_dss(all_xecores) + 1;
+
+ max_wait_num_reports = num_data_rows(per_xecore_buf_size * num_xecores);
+ if (props->wait_num_reports == 0 || props->wait_num_reports > max_wait_num_reports) {
+ xe_gt_dbg(gt, "Invalid EU stall event report count %u\n",
+ props->wait_num_reports);
+ xe_gt_dbg(gt, "Minimum event report count is 1, maximum is %u\n",
+ max_wait_num_reports);
+ return -EINVAL;
+ }
+
+ init_waitqueue_head(&stream->poll_wq);
+ mutex_init(&stream->xecore_buf_lock);
+ INIT_DELAYED_WORK(&stream->buf_poll_work, eu_stall_data_buf_poll_work_fn);
+ stream->per_xecore_buf_size = per_xecore_buf_size;
+ stream->sampling_rate_mult = props->sampling_rate_mult;
+ stream->wait_num_reports = props->wait_num_reports;
+ stream->data_record_size = xe_eu_stall_data_record_size(gt_to_xe(gt));
+
+ ret = xe_eu_stall_data_buf_alloc(stream, last_xecore);
+ if (ret)
+ return ret;
+
+ for_each_dss_steering(xecore, gt, group, instance) {
+ xecore_buf = &stream->xecore_buf[xecore];
+ vaddr_offset = xecore * stream->per_xecore_buf_size;
+ xecore_buf->vaddr = stream->bo->vmap.vaddr + vaddr_offset;
+ }
+ return 0;
+}
+
+static __poll_t xe_eu_stall_stream_poll_locked(struct xe_eu_stall_data_stream *stream,
+ struct file *file, poll_table *wait)
+{
+ __poll_t events = 0;
+
+ poll_wait(file, &stream->poll_wq, wait);
+
+ if (stream->pollin)
+ events |= EPOLLIN;
+
+ return events;
+}
+
+static __poll_t xe_eu_stall_stream_poll(struct file *file, poll_table *wait)
+{
+ struct xe_eu_stall_data_stream *stream = file->private_data;
+ struct xe_gt *gt = stream->gt;
+ __poll_t ret;
+
+ mutex_lock(&gt->eu_stall->stream_lock);
+ ret = xe_eu_stall_stream_poll_locked(stream, file, wait);
+ mutex_unlock(&gt->eu_stall->stream_lock);
+
+ return ret;
+}
+
+static int xe_eu_stall_enable_locked(struct xe_eu_stall_data_stream *stream)
+{
+ struct xe_gt *gt = stream->gt;
+ int ret = 0;
+
+ if (stream->enabled)
+ return ret;
+
+ stream->enabled = true;
+
+ ret = xe_eu_stall_stream_enable(stream);
+
+ queue_delayed_work(gt->eu_stall->buf_ptr_poll_wq,
+ &stream->buf_poll_work,
+ msecs_to_jiffies(POLL_PERIOD_MS));
+ return ret;
+}
+
+static int xe_eu_stall_disable_locked(struct xe_eu_stall_data_stream *stream)
+{
+ struct xe_gt *gt = stream->gt;
+
+ if (!stream->enabled)
+ return 0;
+
+ stream->enabled = false;
+
+ xe_gt_mcr_multicast_write(gt, XEHPC_EUSTALL_BASE, 0);
+
+ cancel_delayed_work_sync(&stream->buf_poll_work);
+
+ if (XE_WA(gt, 22016596838))
+ xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
+ _MASKED_BIT_DISABLE(DISABLE_DOP_GATING));
+
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_RENDER);
+ xe_pm_runtime_put(gt_to_xe(gt));
+
+ return 0;
+}
+
+static long xe_eu_stall_stream_ioctl_locked(struct xe_eu_stall_data_stream *stream,
+ unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case DRM_XE_OBSERVATION_IOCTL_ENABLE:
+ return xe_eu_stall_enable_locked(stream);
+ case DRM_XE_OBSERVATION_IOCTL_DISABLE:
+ return xe_eu_stall_disable_locked(stream);
+ }
+
+ return -EINVAL;
+}
+
+static long xe_eu_stall_stream_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct xe_eu_stall_data_stream *stream = file->private_data;
+ struct xe_gt *gt = stream->gt;
+ long ret;
+
+ mutex_lock(&gt->eu_stall->stream_lock);
+ ret = xe_eu_stall_stream_ioctl_locked(stream, cmd, arg);
+ mutex_unlock(&gt->eu_stall->stream_lock);
+
+ return ret;
+}
+
+static int xe_eu_stall_stream_close(struct inode *inode, struct file *file)
+{
+ struct xe_eu_stall_data_stream *stream = file->private_data;
+ struct xe_gt *gt = stream->gt;
+
+ drm_dev_put(&gt->tile->xe->drm);
+
+ mutex_lock(&gt->eu_stall->stream_lock);
+ xe_eu_stall_disable_locked(stream);
+ xe_eu_stall_data_buf_destroy(stream);
+ xe_eu_stall_stream_free(stream);
+ mutex_unlock(&gt->eu_stall->stream_lock);
+
+ return 0;
+}
+
+static const struct file_operations fops_eu_stall = {
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+ .release = xe_eu_stall_stream_close,
+ .poll = xe_eu_stall_stream_poll,
+ .read = xe_eu_stall_stream_read,
+ .unlocked_ioctl = xe_eu_stall_stream_ioctl,
+ .compat_ioctl = xe_eu_stall_stream_ioctl,
+};
+
+static int xe_eu_stall_stream_open_locked(struct drm_device *dev,
+ struct eu_stall_open_properties *props,
+ struct drm_file *file)
+{
+ struct xe_eu_stall_data_stream *stream;
+ struct xe_gt *gt = props->gt;
+ unsigned long f_flags = 0;
+ int ret, stream_fd;
+
+ /* Only one session can be active at any time */
+ if (gt->eu_stall->stream) {
+ xe_gt_dbg(gt, "EU stall sampling session already active\n");
+ return -EBUSY;
+ }
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ return -ENOMEM;
+
+ gt->eu_stall->stream = stream;
+ stream->gt = gt;
+
+ ret = xe_eu_stall_stream_init(stream, props);
+ if (ret) {
+ xe_gt_dbg(gt, "EU stall stream init failed : %d\n", ret);
+ goto err_free;
+ }
+
+ stream_fd = anon_inode_getfd("[xe_eu_stall]", &fops_eu_stall, stream, f_flags);
+ if (stream_fd < 0) {
+ ret = stream_fd;
+ xe_gt_dbg(gt, "EU stall inode get fd failed : %d\n", ret);
+ goto err_destroy;
+ }
+
+ /* Take a reference on the driver that will be kept with stream_fd
+ * until its release.
+ */
+ drm_dev_get(&gt->tile->xe->drm);
+
+ return stream_fd;
+
+err_destroy:
+ xe_eu_stall_data_buf_destroy(stream);
+err_free:
+ xe_eu_stall_stream_free(stream);
+ return ret;
+}
+
+/**
+ * xe_eu_stall_stream_open - Open a xe EU stall data stream fd
+ *
+ * @dev: DRM device pointer
+ * @data: pointer to first struct @drm_xe_ext_set_property in
+ * the chain of input properties from the user space.
+ * @file: DRM file pointer
+ *
+ * This function opens a EU stall data stream with input properties from
+ * the user space.
+ *
+ * Returns: EU stall data stream fd on success or a negative error code.
+ */
+int xe_eu_stall_stream_open(struct drm_device *dev, u64 data, struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct eu_stall_open_properties props = {};
+ int ret;
+
+ if (!xe_eu_stall_supported_on_platform(xe)) {
+ drm_dbg(&xe->drm, "EU stall monitoring is not supported on this platform\n");
+ return -ENODEV;
+ }
+
+ if (xe_observation_paranoid && !perfmon_capable()) {
+ drm_dbg(&xe->drm, "Insufficient privileges for EU stall monitoring\n");
+ return -EACCES;
+ }
+
+ /* Initialize and set default values */
+ props.wait_num_reports = 1;
+ props.sampling_rate_mult = 4;
+
+ ret = xe_eu_stall_user_extensions(xe, data, 0, &props);
+ if (ret)
+ return ret;
+
+ if (!props.gt) {
+ drm_dbg(&xe->drm, "GT ID not provided for EU stall sampling\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&props.gt->eu_stall->stream_lock);
+ ret = xe_eu_stall_stream_open_locked(dev, &props, file);
+ mutex_unlock(&props.gt->eu_stall->stream_lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.h b/drivers/gpu/drm/xe/xe_eu_stall.h
new file mode 100644
index 000000000000..d1c76e503799
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_eu_stall.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __XE_EU_STALL_H__
+#define __XE_EU_STALL_H__
+
+#include "xe_gt_types.h"
+#include "xe_sriov.h"
+
+size_t xe_eu_stall_get_per_xecore_buf_size(void);
+size_t xe_eu_stall_data_record_size(struct xe_device *xe);
+size_t xe_eu_stall_get_sampling_rates(u32 *num_rates, const u64 **rates);
+
+int xe_eu_stall_init(struct xe_gt *gt);
+int xe_eu_stall_stream_open(struct drm_device *dev,
+ u64 data,
+ struct drm_file *file);
+
+static inline bool xe_eu_stall_supported_on_platform(struct xe_device *xe)
+{
+ return !IS_SRIOV_VF(xe) && (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20);
+}
+#endif
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index f23ac1e2ed88..44364c042ad7 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -33,7 +33,7 @@
*
* In XE we avoid all of this complication by not allowing a BO list to be
* passed into an exec, using the dma-buf implicit sync uAPI, have binds as
- * seperate operations, and using the DRM scheduler to flow control the ring.
+ * separate operations, and using the DRM scheduler to flow control the ring.
* Let's deep dive on each of these.
*
* We can get away from a BO list by forcing the user to use in / out fences on
@@ -132,12 +132,16 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (XE_IOCTL_DBG(xe, !q))
return -ENOENT;
- if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM))
- return -EINVAL;
+ if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM)) {
+ err = -EINVAL;
+ goto err_exec_queue;
+ }
if (XE_IOCTL_DBG(xe, args->num_batch_buffer &&
- q->width != args->num_batch_buffer))
- return -EINVAL;
+ q->width != args->num_batch_buffer)) {
+ err = -EINVAL;
+ goto err_exec_queue;
+ }
if (XE_IOCTL_DBG(xe, q->ops->reset_status(q))) {
err = -ECANCELED;
@@ -172,8 +176,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
if (xe_exec_queue_is_parallel(q)) {
- err = __copy_from_user(addresses, addresses_user, sizeof(u64) *
- q->width);
+ err = copy_from_user(addresses, addresses_user, sizeof(u64) *
+ q->width);
if (err) {
err = -EFAULT;
goto err_syncs;
@@ -199,14 +203,14 @@ retry:
write_locked = false;
}
if (err)
- goto err_syncs;
+ goto err_hw_exec_mode;
if (write_locked) {
err = xe_vm_userptr_pin(vm);
downgrade_write(&vm->lock);
write_locked = false;
if (err)
- goto err_hw_exec_mode;
+ goto err_unlock_list;
}
if (!args->num_batch_buffer) {
@@ -220,6 +224,7 @@ retry:
fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
+ xe_vm_unlock(vm);
goto err_unlock_list;
}
for (i = 0; i < num_syncs; i++)
@@ -257,6 +262,12 @@ retry:
goto err_exec;
}
+ if (xe_exec_queue_uses_pxp(q)) {
+ err = xe_vm_validate_protected(q->vm);
+ if (err)
+ goto err_exec;
+ }
+
job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ?
addresses : &args->address);
if (IS_ERR(job)) {
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index d098d2dd1b2d..fee22358cc09 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -8,6 +8,7 @@
#include <linux/nospec.h>
#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <uapi/drm/xe_drm.h>
@@ -16,6 +17,7 @@
#include "xe_hw_engine_class_sysfs.h"
#include "xe_hw_engine_group.h"
#include "xe_hw_fence.h"
+#include "xe_irq.h"
#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_migrate.h"
@@ -23,6 +25,7 @@
#include "xe_ring_ops_types.h"
#include "xe_trace.h"
#include "xe_vm.h"
+#include "xe_pxp.h"
enum xe_exec_queue_sched_prop {
XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
@@ -36,6 +39,8 @@ static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue
static void __xe_exec_queue_free(struct xe_exec_queue *q)
{
+ if (xe_exec_queue_uses_pxp(q))
+ xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
if (q->vm)
xe_vm_put(q->vm);
@@ -68,6 +73,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
q->gt = gt;
q->class = hwe->class;
q->width = width;
+ q->msix_vec = XE_IRQ_DEFAULT_MSIX;
q->logical_mask = logical_mask;
q->fence_irq = &gt->fence_irq[hwe->class];
q->ring_ops = gt->ring_ops[hwe->class];
@@ -75,6 +81,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
INIT_LIST_HEAD(&q->lr.link);
INIT_LIST_HEAD(&q->multi_gt_link);
INIT_LIST_HEAD(&q->hw_engine_group_link);
+ INIT_LIST_HEAD(&q->pxp.link);
q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
q->sched_props.preempt_timeout_us =
@@ -107,35 +114,37 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
static int __xe_exec_queue_init(struct xe_exec_queue *q)
{
- struct xe_vm *vm = q->vm;
int i, err;
+ u32 flags = 0;
- if (vm) {
- err = xe_vm_lock(vm, true);
- if (err)
- return err;
+ /*
+ * PXP workloads executing on RCS or CCS must run in isolation (i.e. no
+ * other workload can use the EUs at the same time). On MTL this is done
+ * by setting the RUNALONE bit in the LRC, while starting on Xe2 there
+ * is a dedicated bit for it.
+ */
+ if (xe_exec_queue_uses_pxp(q) &&
+ (q->class == XE_ENGINE_CLASS_RENDER || q->class == XE_ENGINE_CLASS_COMPUTE)) {
+ if (GRAPHICS_VER(gt_to_xe(q->gt)) >= 20)
+ flags |= XE_LRC_CREATE_PXP;
+ else
+ flags |= XE_LRC_CREATE_RUNALONE;
}
for (i = 0; i < q->width; ++i) {
- q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K);
+ q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec, flags);
if (IS_ERR(q->lrc[i])) {
err = PTR_ERR(q->lrc[i]);
- goto err_unlock;
+ goto err_lrc;
}
}
- if (vm)
- xe_vm_unlock(vm);
-
err = q->ops->init(q);
if (err)
goto err_lrc;
return 0;
-err_unlock:
- if (vm)
- xe_vm_unlock(vm);
err_lrc:
for (i = i - 1; i >= 0; --i)
xe_lrc_put(q->lrc[i]);
@@ -150,6 +159,9 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
struct xe_exec_queue *q;
int err;
+ /* VMs for GSCCS queues (and only those) must have the XE_VM_FLAG_GSC flag */
+ xe_assert(xe, !vm || (!!(vm->flags & XE_VM_FLAG_GSC) == !!(hwe->engine_id == XE_HW_ENGINE_GSCCS0)));
+
q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
extensions);
if (IS_ERR(q))
@@ -159,12 +171,26 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
if (err)
goto err_post_alloc;
+ /*
+ * We can only add the queue to the PXP list after the init is complete,
+ * because the PXP termination can call exec_queue_kill and that will
+ * go bad if the queue is only half-initialized. This means that we
+ * can't do it when we handle the PXP extension in __xe_exec_queue_alloc
+ * and we need to do it here instead.
+ */
+ if (xe_exec_queue_uses_pxp(q)) {
+ err = xe_pxp_exec_queue_add(xe->pxp, q);
+ if (err)
+ goto err_post_alloc;
+ }
+
return q;
err_post_alloc:
__xe_exec_queue_free(q);
return ERR_PTR(err);
}
+ALLOW_ERROR_INJECTION(xe_exec_queue_create, ERRNO);
struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
struct xe_vm *vm,
@@ -240,12 +266,16 @@ struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
return q;
}
+ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
void xe_exec_queue_destroy(struct kref *ref)
{
struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
struct xe_exec_queue *eq, *next;
+ if (xe_exec_queue_uses_pxp(q))
+ xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
+
xe_exec_queue_last_fence_put_unlocked(q);
if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
list_for_each_entry_safe(eq, next, &q->multi_gt_list,
@@ -260,8 +290,17 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
{
int i;
+ /*
+ * Before releasing our ref to lrc and xef, accumulate our run ticks
+ * and wakeup any waiters.
+ */
+ xe_exec_queue_update_run_ticks(q);
+ if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
+ wake_up_var(&q->xef->exec_queue.pending_removal);
+
for (i = 0; i < q->width; ++i)
xe_lrc_put(q->lrc[i]);
+
__xe_exec_queue_free(q);
}
@@ -392,6 +431,22 @@ static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *
return 0;
}
+static int
+exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value)
+{
+ if (value == DRM_XE_PXP_TYPE_NONE)
+ return 0;
+
+ /* we only support HWDRM sessions right now */
+ if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM))
+ return -EINVAL;
+
+ if (!xe_pxp_is_enabled(xe->pxp))
+ return -ENODEV;
+
+ return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM);
+}
+
typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
struct xe_exec_queue *q,
u64 value);
@@ -399,6 +454,7 @@ typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
+ [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE] = exec_queue_set_pxp_type,
};
static int exec_queue_user_ext_set_property(struct xe_device *xe,
@@ -410,7 +466,7 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
int err;
u32 idx;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -418,7 +474,8 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
ARRAY_SIZE(exec_queue_set_property_funcs)) ||
XE_IOCTL_DBG(xe, ext.pad) ||
XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
- ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE))
+ ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE &&
+ ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE))
return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
@@ -448,7 +505,7 @@ static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue
if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
return -E2BIG;
- err = __copy_from_user(&ext, address, sizeof(ext));
+ err = copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
@@ -470,7 +527,7 @@ static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue
return 0;
}
-static u32 calc_validate_logical_mask(struct xe_device *xe, struct xe_gt *gt,
+static u32 calc_validate_logical_mask(struct xe_device *xe,
struct drm_xe_engine_class_instance *eci,
u16 width, u16 num_placements)
{
@@ -532,15 +589,15 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
u64_to_user_ptr(args->instances);
struct xe_hw_engine *hwe;
struct xe_vm *vm;
- struct xe_gt *gt;
struct xe_tile *tile;
struct xe_exec_queue *q = NULL;
u32 logical_mask;
+ u32 flags = 0;
u32 id;
u32 len;
int err;
- if (XE_IOCTL_DBG(xe, args->flags) ||
+ if (XE_IOCTL_DBG(xe, args->flags & ~DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT) ||
XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
return -EINVAL;
@@ -548,15 +605,17 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
return -EINVAL;
- err = __copy_from_user(eci, user_eci,
- sizeof(struct drm_xe_engine_class_instance) *
- len);
+ err = copy_from_user(eci, user_eci,
+ sizeof(struct drm_xe_engine_class_instance) * len);
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
return -EINVAL;
+ if (args->flags & DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT)
+ flags |= EXEC_QUEUE_FLAG_LOW_LATENCY;
+
if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
if (XE_IOCTL_DBG(xe, args->width != 1) ||
XE_IOCTL_DBG(xe, args->num_placements != 1) ||
@@ -565,8 +624,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
for_each_tile(tile, xe, id) {
struct xe_exec_queue *new;
- u32 flags = EXEC_QUEUE_FLAG_VM;
+ flags |= EXEC_QUEUE_FLAG_VM;
if (id)
flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
@@ -585,8 +644,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
&q->multi_gt_link);
}
} else {
- gt = xe_device_get_gt(xe, eci[0].gt_id);
- logical_mask = calc_validate_logical_mask(xe, gt, eci,
+ logical_mask = calc_validate_logical_mask(xe, eci,
args->width,
args->num_placements);
if (XE_IOCTL_DBG(xe, !logical_mask))
@@ -613,7 +671,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
}
q = xe_exec_queue_create(xe, vm, logical_mask,
- args->width, hwe, 0,
+ args->width, hwe, flags,
args->extensions);
up_read(&vm->lock);
xe_vm_put(vm);
@@ -756,20 +814,21 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
*/
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
{
- struct xe_file *xef;
+ struct xe_device *xe = gt_to_xe(q->gt);
struct xe_lrc *lrc;
- u32 old_ts, new_ts;
+ u64 old_ts, new_ts;
+ int idx;
/*
- * Jobs that are run during driver load may use an exec_queue, but are
- * not associated with a user xe file, so avoid accumulating busyness
- * for kernel specific work.
+ * Jobs that are executed by kernel doesn't have a corresponding xe_file
+ * and thus are not accounted.
*/
- if (!q->vm || !q->vm->xef)
+ if (!q->xef)
return;
- xef = q->vm->xef;
-
+ /* Synchronize with unbind while holding the xe file open */
+ if (!drm_dev_enter(&xe->drm, &idx))
+ return;
/*
* Only sample the first LRC. For parallel submission, all of them are
* scheduled together and we compensate that below by multiplying by
@@ -780,7 +839,9 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
*/
lrc = q->lrc[0];
new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
- xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
+ q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
+
+ drm_dev_exit(idx);
}
/**
@@ -820,7 +881,10 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
mutex_lock(&xef->exec_queue.lock);
q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
+ if (q)
+ atomic_inc(&xef->exec_queue.pending_removal);
mutex_unlock(&xef->exec_queue.lock);
+
if (XE_IOCTL_DBG(xe, !q))
return -ENOENT;
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
index 90c7f73eab88..17bc50a7f05a 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue.h
@@ -57,6 +57,11 @@ static inline bool xe_exec_queue_is_parallel(struct xe_exec_queue *q)
return q->width > 1;
}
+static inline bool xe_exec_queue_uses_pxp(struct xe_exec_queue *q)
+{
+ return q->pxp.type;
+}
+
bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
bool xe_exec_queue_ring_full(struct xe_exec_queue *q);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 7deb480e26af..cc1cffb5c87f 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -41,7 +41,7 @@ struct xe_exec_queue {
/** @xef: Back pointer to xe file if this is user created exec queue */
struct xe_file *xef;
- /** @gt: graphics tile this exec queue can submit to */
+ /** @gt: GT structure this exec queue can submit to */
struct xe_gt *gt;
/**
* @hwe: A hardware of the same class. May (physical engine) or may not
@@ -63,6 +63,8 @@ struct xe_exec_queue {
char name[MAX_FENCE_NAME_LEN];
/** @width: width (number BB submitted per exec) of this exec queue */
u16 width;
+ /** @msix_vec: MSI-X vector (for platforms that support it) */
+ u16 msix_vec;
/** @fence_irq: fence IRQ used to signal job completion */
struct xe_hw_fence_irq *fence_irq;
@@ -83,6 +85,8 @@ struct xe_exec_queue {
#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(3)
/* kernel exec_queue only, set priority to highest level */
#define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(4)
+/* flag to indicate low latency hint to guc */
+#define EXEC_QUEUE_FLAG_LOW_LATENCY BIT(5)
/**
* @flags: flags for this exec queue, should statically setup aside from ban
@@ -128,6 +132,14 @@ struct xe_exec_queue {
struct list_head link;
} lr;
+ /** @pxp: PXP info tracking */
+ struct {
+ /** @pxp.type: PXP session type used by this queue */
+ u8 type;
+ /** @pxp.link: link into the list of PXP exec queues */
+ struct list_head link;
+ } pxp;
+
/** @ops: submission backend exec queue operations */
const struct xe_exec_queue_ops *ops;
@@ -143,7 +155,7 @@ struct xe_exec_queue {
/** @hw_engine_group_link: link into exec queues in the same hw engine group */
struct list_head hw_engine_group_link;
/** @lrc: logical ring context for this exec queue */
- struct xe_lrc *lrc[];
+ struct xe_lrc *lrc[] __counted_by(width);
};
/**
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index 6a59165b9569..788f56b066b6 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -17,6 +17,7 @@
#include "xe_exec_queue.h"
#include "xe_gt.h"
#include "xe_hw_fence.h"
+#include "xe_irq.h"
#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_mmio.h"
@@ -44,8 +45,10 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
u32 ctx_id)
{
struct xe_gt *gt = hwe->gt;
+ struct xe_mmio *mmio = &gt->mmio;
struct xe_device *xe = gt_to_xe(gt);
u64 lrc_desc;
+ u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE);
lrc_desc = xe_lrc_descriptor(lrc);
@@ -58,7 +61,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
}
if (hwe->class == XE_ENGINE_CLASS_COMPUTE)
- xe_mmio_write32(hwe->gt, RCU_MODE,
+ xe_mmio_write32(mmio, RCU_MODE,
_MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail);
@@ -76,17 +79,19 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
*/
wmb();
- xe_mmio_write32(gt, RING_HWS_PGA(hwe->mmio_base),
+ xe_mmio_write32(mmio, RING_HWS_PGA(hwe->mmio_base),
xe_bo_ggtt_addr(hwe->hwsp));
- xe_mmio_read32(gt, RING_HWS_PGA(hwe->mmio_base));
- xe_mmio_write32(gt, RING_MODE(hwe->mmio_base),
- _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
+ xe_mmio_read32(mmio, RING_HWS_PGA(hwe->mmio_base));
- xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base),
+ if (xe_device_has_msix(gt_to_xe(hwe->gt)))
+ ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
+ xe_mmio_write32(mmio, RING_MODE(hwe->mmio_base), ring_mode);
+
+ xe_mmio_write32(mmio, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base),
lower_32_bits(lrc_desc));
- xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_HI(hwe->mmio_base),
+ xe_mmio_write32(mmio, RING_EXECLIST_SQ_CONTENTS_HI(hwe->mmio_base),
upper_32_bits(lrc_desc));
- xe_mmio_write32(gt, RING_EXECLIST_CONTROL(hwe->mmio_base),
+ xe_mmio_write32(mmio, RING_EXECLIST_CONTROL(hwe->mmio_base),
EL_CTRL_LOAD);
}
@@ -168,8 +173,8 @@ static u64 read_execlist_status(struct xe_hw_engine *hwe)
struct xe_gt *gt = hwe->gt;
u32 hi, lo;
- lo = xe_mmio_read32(gt, RING_EXECLIST_STATUS_LO(hwe->mmio_base));
- hi = xe_mmio_read32(gt, RING_EXECLIST_STATUS_HI(hwe->mmio_base));
+ lo = xe_mmio_read32(&gt->mmio, RING_EXECLIST_STATUS_LO(hwe->mmio_base));
+ hi = xe_mmio_read32(&gt->mmio, RING_EXECLIST_STATUS_HI(hwe->mmio_base));
return lo | (u64)hi << 32;
}
@@ -264,7 +269,7 @@ struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
port->hwe = hwe;
- port->lrc = xe_lrc_create(hwe, NULL, SZ_16K);
+ port->lrc = xe_lrc_create(hwe, NULL, SZ_16K, XE_IRQ_DEFAULT_MSIX, 0);
if (IS_ERR(port->lrc)) {
err = PTR_ERR(port->lrc);
goto err;
@@ -292,7 +297,7 @@ err:
void xe_execlist_port_destroy(struct xe_execlist_port *port)
{
- del_timer(&port->irq_fail);
+ timer_delete(&port->irq_fail);
/* Prevent an interrupt while we're destroying */
spin_lock_irq(&gt_to_xe(port->hwe->gt)->irq.lock);
@@ -312,7 +317,7 @@ execlist_run_job(struct drm_sched_job *drm_job)
q->ring_ops->emit_job(job);
xe_execlist_make_active(exl);
- return dma_fence_get(job->fence);
+ return job->fence;
}
static void execlist_job_free(struct drm_sched_job *drm_job)
@@ -331,6 +336,15 @@ static const struct drm_sched_backend_ops drm_sched_ops = {
static int execlist_exec_queue_init(struct xe_exec_queue *q)
{
struct drm_gpu_scheduler *sched;
+ const struct drm_sched_init_args args = {
+ .ops = &drm_sched_ops,
+ .num_rqs = 1,
+ .credit_limit = q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES,
+ .hang_limit = XE_SCHED_HANG_LIMIT,
+ .timeout = XE_SCHED_JOB_TIMEOUT,
+ .name = q->hwe->name,
+ .dev = gt_to_xe(q->gt)->drm.dev,
+ };
struct xe_execlist_exec_queue *exl;
struct xe_device *xe = gt_to_xe(q->gt);
int err;
@@ -345,11 +359,7 @@ static int execlist_exec_queue_init(struct xe_exec_queue *q)
exl->q = q;
- err = drm_sched_init(&exl->sched, &drm_sched_ops, NULL, 1,
- q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES,
- XE_SCHED_HANG_LIMIT, XE_SCHED_JOB_TIMEOUT,
- NULL, NULL, q->hwe->name,
- gt_to_xe(q->gt)->drm.dev);
+ err = drm_sched_init(&exl->sched, &args);
if (err)
goto err_free;
diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c
index 7d9fc489dcb8..8a5cba22b586 100644
--- a/drivers/gpu/drm/xe/xe_force_wake.c
+++ b/drivers/gpu/drm/xe/xe_force_wake.c
@@ -21,15 +21,25 @@ static const char *str_wake_sleep(bool wake)
return wake ? "wake" : "sleep";
}
-static void domain_init(struct xe_force_wake_domain *domain,
+static void mark_domain_initialized(struct xe_force_wake *fw,
+ enum xe_force_wake_domain_id id)
+{
+ fw->initialized_domains |= BIT(id);
+}
+
+static void init_domain(struct xe_force_wake *fw,
enum xe_force_wake_domain_id id,
struct xe_reg reg, struct xe_reg ack)
{
+ struct xe_force_wake_domain *domain = &fw->domains[id];
+
domain->id = id;
domain->reg_ctl = reg;
domain->reg_ack = ack;
domain->val = FORCEWAKE_MT(FORCEWAKE_KERNEL);
domain->mask = FORCEWAKE_MT_MASK(FORCEWAKE_KERNEL);
+
+ mark_domain_initialized(fw, id);
}
void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
@@ -39,17 +49,12 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
fw->gt = gt;
spin_lock_init(&fw->lock);
- /* Assuming gen11+ so assert this assumption is correct */
- xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
-
if (xe->info.graphics_verx100 >= 1270) {
- domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
- XE_FW_DOMAIN_ID_GT,
+ init_domain(fw, XE_FW_DOMAIN_ID_GT,
FORCEWAKE_GT,
FORCEWAKE_ACK_GT_MTL);
} else {
- domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
- XE_FW_DOMAIN_ID_GT,
+ init_domain(fw, XE_FW_DOMAIN_ID_GT,
FORCEWAKE_GT,
FORCEWAKE_ACK_GT);
}
@@ -59,12 +64,8 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
{
int i, j;
- /* Assuming gen11+ so assert this assumption is correct */
- xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
-
if (!xe_gt_is_media_type(gt))
- domain_init(&fw->domains[XE_FW_DOMAIN_ID_RENDER],
- XE_FW_DOMAIN_ID_RENDER,
+ init_domain(fw, XE_FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER,
FORCEWAKE_ACK_RENDER);
@@ -72,8 +73,7 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
if (!(gt->info.engine_mask & BIT(i)))
continue;
- domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j],
- XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j,
+ init_domain(fw, XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j,
FORCEWAKE_MEDIA_VDBOX(j),
FORCEWAKE_ACK_MEDIA_VDBOX(j));
}
@@ -82,15 +82,13 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
if (!(gt->info.engine_mask & BIT(i)))
continue;
- domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j],
- XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j,
+ init_domain(fw, XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j,
FORCEWAKE_MEDIA_VEBOX(j),
FORCEWAKE_ACK_MEDIA_VEBOX(j));
}
if (gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0))
- domain_init(&fw->domains[XE_FW_DOMAIN_ID_GSC],
- XE_FW_DOMAIN_ID_GSC,
+ init_domain(fw, XE_FW_DOMAIN_ID_GSC,
FORCEWAKE_GSC,
FORCEWAKE_ACK_GSC);
}
@@ -100,7 +98,7 @@ static void __domain_ctl(struct xe_gt *gt, struct xe_force_wake_domain *domain,
if (IS_SRIOV_VF(gt_to_xe(gt)))
return;
- xe_mmio_write32(gt, domain->reg_ctl, domain->mask | (wake ? domain->val : 0));
+ xe_mmio_write32(&gt->mmio, domain->reg_ctl, domain->mask | (wake ? domain->val : 0));
}
static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain, bool wake)
@@ -111,7 +109,7 @@ static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain,
if (IS_SRIOV_VF(gt_to_xe(gt)))
return 0;
- ret = xe_mmio_wait32(gt, domain->reg_ack, domain->val, wake ? domain->val : 0,
+ ret = xe_mmio_wait32(&gt->mmio, domain->reg_ack, domain->val, wake ? domain->val : 0,
XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
&value, true);
if (ret)
@@ -156,52 +154,108 @@ static int domain_sleep_wait(struct xe_gt *gt,
(ffs(tmp__) - 1))) && \
domain__->reg_ctl.addr)
-int xe_force_wake_get(struct xe_force_wake *fw,
- enum xe_force_wake_domains domains)
+/**
+ * xe_force_wake_get() : Increase the domain refcount
+ * @fw: struct xe_force_wake
+ * @domains: forcewake domains to get refcount on
+ *
+ * This function wakes up @domains if they are asleep and takes references.
+ * If requested domain is XE_FORCEWAKE_ALL then only applicable/initialized
+ * domains will be considered for refcount and it is a caller responsibility
+ * to check returned ref if it includes any specific domain by using
+ * xe_force_wake_ref_has_domain() function. Caller must call
+ * xe_force_wake_put() function to decrease incremented refcounts.
+ *
+ * Return: opaque reference to woken domains or zero if none of requested
+ * domains were awake.
+ */
+unsigned int __must_check xe_force_wake_get(struct xe_force_wake *fw,
+ enum xe_force_wake_domains domains)
{
struct xe_gt *gt = fw->gt;
struct xe_force_wake_domain *domain;
- enum xe_force_wake_domains tmp, woken = 0;
+ unsigned int ref_incr = 0, awake_rqst = 0, awake_failed = 0;
+ unsigned int tmp, ref_rqst;
unsigned long flags;
- int ret = 0;
+ xe_gt_assert(gt, is_power_of_2(domains));
+ xe_gt_assert(gt, domains <= XE_FORCEWAKE_ALL);
+ xe_gt_assert(gt, domains == XE_FORCEWAKE_ALL || fw->initialized_domains & domains);
+
+ ref_rqst = (domains == XE_FORCEWAKE_ALL) ? fw->initialized_domains : domains;
spin_lock_irqsave(&fw->lock, flags);
- for_each_fw_domain_masked(domain, domains, fw, tmp) {
+ for_each_fw_domain_masked(domain, ref_rqst, fw, tmp) {
if (!domain->ref++) {
- woken |= BIT(domain->id);
+ awake_rqst |= BIT(domain->id);
domain_wake(gt, domain);
}
+ ref_incr |= BIT(domain->id);
}
- for_each_fw_domain_masked(domain, woken, fw, tmp) {
- ret |= domain_wake_wait(gt, domain);
+ for_each_fw_domain_masked(domain, awake_rqst, fw, tmp) {
+ if (domain_wake_wait(gt, domain) == 0) {
+ fw->awake_domains |= BIT(domain->id);
+ } else {
+ awake_failed |= BIT(domain->id);
+ --domain->ref;
+ }
}
- fw->awake_domains |= woken;
+ ref_incr &= ~awake_failed;
spin_unlock_irqrestore(&fw->lock, flags);
- return ret;
+ xe_gt_WARN(gt, awake_failed, "Forcewake domain%s %#x failed to acknowledge awake request\n",
+ str_plural(hweight_long(awake_failed)), awake_failed);
+
+ if (domains == XE_FORCEWAKE_ALL && ref_incr == fw->initialized_domains)
+ ref_incr |= XE_FORCEWAKE_ALL;
+
+ return ref_incr;
}
-int xe_force_wake_put(struct xe_force_wake *fw,
- enum xe_force_wake_domains domains)
+/**
+ * xe_force_wake_put - Decrement the refcount and put domain to sleep if refcount becomes 0
+ * @fw: Pointer to the force wake structure
+ * @fw_ref: return of xe_force_wake_get()
+ *
+ * This function reduces the reference counts for domains in fw_ref. If
+ * refcount for any of the specified domain reaches 0, it puts the domain to sleep
+ * and waits for acknowledgment for domain to sleep within 50 milisec timeout.
+ * Warns in case of timeout of ack from domain.
+ */
+void xe_force_wake_put(struct xe_force_wake *fw, unsigned int fw_ref)
{
struct xe_gt *gt = fw->gt;
struct xe_force_wake_domain *domain;
- enum xe_force_wake_domains tmp, sleep = 0;
+ unsigned int tmp, sleep = 0;
unsigned long flags;
- int ret = 0;
+ int ack_fail = 0;
+
+ /*
+ * Avoid unnecessary lock and unlock when the function is called
+ * in error path of individual domains.
+ */
+ if (!fw_ref)
+ return;
+
+ if (xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
+ fw_ref = fw->initialized_domains;
spin_lock_irqsave(&fw->lock, flags);
- for_each_fw_domain_masked(domain, domains, fw, tmp) {
+ for_each_fw_domain_masked(domain, fw_ref, fw, tmp) {
+ xe_gt_assert(gt, domain->ref);
+
if (!--domain->ref) {
sleep |= BIT(domain->id);
domain_sleep(gt, domain);
}
}
for_each_fw_domain_masked(domain, sleep, fw, tmp) {
- ret |= domain_sleep_wait(gt, domain);
+ if (domain_sleep_wait(gt, domain) == 0)
+ fw->awake_domains &= ~BIT(domain->id);
+ else
+ ack_fail |= BIT(domain->id);
}
- fw->awake_domains &= ~sleep;
spin_unlock_irqrestore(&fw->lock, flags);
- return ret;
+ xe_gt_WARN(gt, ack_fail, "Forcewake domain%s %#x failed to acknowledge sleep request\n",
+ str_plural(hweight_long(ack_fail)), ack_fail);
}
diff --git a/drivers/gpu/drm/xe/xe_force_wake.h b/drivers/gpu/drm/xe/xe_force_wake.h
index a2577672f4e3..0e3e84bfa51c 100644
--- a/drivers/gpu/drm/xe/xe_force_wake.h
+++ b/drivers/gpu/drm/xe/xe_force_wake.h
@@ -15,10 +15,9 @@ void xe_force_wake_init_gt(struct xe_gt *gt,
struct xe_force_wake *fw);
void xe_force_wake_init_engines(struct xe_gt *gt,
struct xe_force_wake *fw);
-int xe_force_wake_get(struct xe_force_wake *fw,
- enum xe_force_wake_domains domains);
-int xe_force_wake_put(struct xe_force_wake *fw,
- enum xe_force_wake_domains domains);
+unsigned int __must_check xe_force_wake_get(struct xe_force_wake *fw,
+ enum xe_force_wake_domains domains);
+void xe_force_wake_put(struct xe_force_wake *fw, unsigned int fw_ref);
static inline int
xe_force_wake_ref(struct xe_force_wake *fw,
@@ -46,4 +45,20 @@ xe_force_wake_assert_held(struct xe_force_wake *fw,
xe_gt_assert(fw->gt, fw->awake_domains & domain);
}
+/**
+ * xe_force_wake_ref_has_domain - verifies if the domains are in fw_ref
+ * @fw_ref : the force_wake reference
+ * @domain : forcewake domain to verify
+ *
+ * This function confirms whether the @fw_ref includes a reference to the
+ * specified @domain.
+ *
+ * Return: true if domain is refcounted.
+ */
+static inline bool
+xe_force_wake_ref_has_domain(unsigned int fw_ref, enum xe_force_wake_domains domain)
+{
+ return fw_ref & domain;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_force_wake_types.h b/drivers/gpu/drm/xe/xe_force_wake_types.h
index ed0edc2cdf9f..899fbbcb3ea9 100644
--- a/drivers/gpu/drm/xe/xe_force_wake_types.h
+++ b/drivers/gpu/drm/xe/xe_force_wake_types.h
@@ -48,7 +48,7 @@ enum xe_force_wake_domains {
XE_FW_MEDIA_VEBOX2 = BIT(XE_FW_DOMAIN_ID_MEDIA_VEBOX2),
XE_FW_MEDIA_VEBOX3 = BIT(XE_FW_DOMAIN_ID_MEDIA_VEBOX3),
XE_FW_GSC = BIT(XE_FW_DOMAIN_ID_GSC),
- XE_FORCEWAKE_ALL = BIT(XE_FW_DOMAIN_ID_COUNT) - 1
+ XE_FORCEWAKE_ALL = BIT(XE_FW_DOMAIN_ID_COUNT)
};
/**
@@ -78,7 +78,9 @@ struct xe_force_wake {
/** @lock: protects everything force wake struct */
spinlock_t lock;
/** @awake_domains: mask of all domains awake */
- enum xe_force_wake_domains awake_domains;
+ unsigned int awake_domains;
+ /** @initialized_domains: mask of all initialized domains */
+ unsigned int initialized_domains;
/** @domains: force wake domains */
struct xe_force_wake_domain domains[XE_FW_DOMAIN_ID_COUNT];
};
diff --git a/drivers/gpu/drm/xe/xe_gen_wa_oob.c b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
index 904cf47925aa..ed9183599e31 100644
--- a/drivers/gpu/drm/xe/xe_gen_wa_oob.c
+++ b/drivers/gpu/drm/xe/xe_gen_wa_oob.c
@@ -28,10 +28,10 @@
"\n" \
"#endif\n"
-static void print_usage(FILE *f)
+static void print_usage(FILE *f, const char *progname)
{
fprintf(f, "usage: %s <input-rule-file> <generated-c-source-file> <generated-c-header-file>\n",
- program_invocation_short_name);
+ progname);
}
static void print_parse_error(const char *err_msg, const char *line,
@@ -144,7 +144,7 @@ int main(int argc, const char *argv[])
if (argc < 3) {
fprintf(stderr, "ERROR: wrong arguments\n");
- print_usage(stderr);
+ print_usage(stderr, argv[0]);
return 1;
}
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 2895f154654c..2c799958c1e4 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -5,6 +5,7 @@
#include "xe_ggtt.h"
+#include <linux/fault-inject.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/sizes.h>
@@ -107,8 +108,10 @@ static unsigned int probe_gsm_size(struct pci_dev *pdev)
static void ggtt_update_access_counter(struct xe_ggtt *ggtt)
{
- struct xe_gt *gt = XE_WA(ggtt->tile->primary_gt, 22019338487) ? ggtt->tile->primary_gt :
- ggtt->tile->media_gt;
+ struct xe_tile *tile = ggtt->tile;
+ struct xe_gt *affected_gt = XE_WA(tile->primary_gt, 22019338487) ?
+ tile->primary_gt : tile->media_gt;
+ struct xe_mmio *mmio = &affected_gt->mmio;
u32 max_gtt_writes = XE_WA(ggtt->tile->primary_gt, 22019338487) ? 1100 : 63;
/*
* Wa_22019338487: GMD_ID is a RO register, a dummy write forces gunit
@@ -118,7 +121,7 @@ static void ggtt_update_access_counter(struct xe_ggtt *ggtt)
lockdep_assert_held(&ggtt->lock);
if ((++ggtt->access_count % max_gtt_writes) == 0) {
- xe_mmio_write32(gt, GMD_ID, 0x0);
+ xe_mmio_write32(mmio, GMD_ID, 0x0);
ggtt->access_count = 0;
}
}
@@ -198,6 +201,13 @@ static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
.ggtt_set_pte = xe_ggtt_set_pte_and_flush,
};
+static void dev_fini_ggtt(void *arg)
+{
+ struct xe_ggtt *ggtt = arg;
+
+ drain_workqueue(ggtt->wq);
+}
+
/**
* xe_ggtt_init_early - Early GGTT initialization
* @ggtt: the &xe_ggtt to be initialized
@@ -243,7 +253,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
else
ggtt->pt_ops = &xelp_pt_ops;
- ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, 0);
+ ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
drm_mm_init(&ggtt->mm, xe_wopcm_size(xe),
ggtt->size - xe_wopcm_size(xe));
@@ -254,6 +264,10 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
if (err)
return err;
+ err = devm_add_action_or_reset(xe->drm.dev, dev_fini_ggtt, ggtt);
+ if (err)
+ return err;
+
if (IS_SRIOV_VF(xe)) {
err = xe_gt_sriov_vf_prepare_ggtt(xe_tile_get_gt(ggtt->tile, 0));
if (err)
@@ -262,6 +276,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
return 0;
}
+ALLOW_ERROR_INJECTION(xe_ggtt_init_early, ERRNO); /* See xe_pci_probe() */
static void xe_ggtt_invalidate(struct xe_ggtt *ggtt);
@@ -358,10 +373,10 @@ int xe_ggtt_init(struct xe_ggtt *ggtt)
/*
* So we don't need to worry about 64K GGTT layout when dealing with
- * scratch entires, rather keep the scratch page in system memory on
+ * scratch entries, rather keep the scratch page in system memory on
* platforms where 64K pages are needed for VRAM.
*/
- flags = XE_BO_FLAG_PINNED;
+ flags = 0;
if (ggtt->flags & XE_GGTT_FLAGS_64K)
flags |= XE_BO_FLAG_SYSTEM;
else
@@ -397,6 +412,16 @@ static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
static void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
{
+ struct xe_device *xe = tile_to_xe(ggtt->tile);
+
+ /*
+ * XXX: Barrier for GGTT pages. Unsure exactly why this required but
+ * without this LNL is having issues with the GuC reading scratch page
+ * vs. correct GGTT page. Not particularly a hot code path so blindly
+ * do a mmio read here which results in GuC reading correct GGTT page.
+ */
+ xe_mmio_read32(xe_root_tile_mmio(xe), VF_CAP_REG);
+
/* Each GT in a tile has its own TLB to cache GGTT lookups */
ggtt_invalidate_gt_tlb(ggtt->tile->primary_gt);
ggtt_invalidate_gt_tlb(ggtt->tile->media_gt);
@@ -584,10 +609,10 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
u64 start;
u64 offset, pte;
- if (XE_WARN_ON(!bo->ggtt_node))
+ if (XE_WARN_ON(!bo->ggtt_node[ggtt->tile->id]))
return;
- start = bo->ggtt_node->base.start;
+ start = bo->ggtt_node[ggtt->tile->id]->base.start;
for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
@@ -598,15 +623,16 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end)
{
+ u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE;
+ u8 tile_id = ggtt->tile->id;
int err;
- u64 alignment = XE_PAGE_SIZE;
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
alignment = SZ_64K;
- if (XE_WARN_ON(bo->ggtt_node)) {
+ if (XE_WARN_ON(bo->ggtt_node[tile_id])) {
/* Someone's already inserted this BO in the GGTT */
- xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size);
+ xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size);
return 0;
}
@@ -616,19 +642,19 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
- bo->ggtt_node = xe_ggtt_node_init(ggtt);
- if (IS_ERR(bo->ggtt_node)) {
- err = PTR_ERR(bo->ggtt_node);
- bo->ggtt_node = NULL;
+ bo->ggtt_node[tile_id] = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(bo->ggtt_node[tile_id])) {
+ err = PTR_ERR(bo->ggtt_node[tile_id]);
+ bo->ggtt_node[tile_id] = NULL;
goto out;
}
mutex_lock(&ggtt->lock);
- err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node->base, bo->size,
- alignment, 0, start, end, 0);
+ err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node[tile_id]->base,
+ bo->size, alignment, 0, start, end, 0);
if (err) {
- xe_ggtt_node_fini(bo->ggtt_node);
- bo->ggtt_node = NULL;
+ xe_ggtt_node_fini(bo->ggtt_node[tile_id]);
+ bo->ggtt_node[tile_id] = NULL;
} else {
xe_ggtt_map_bo(ggtt, bo);
}
@@ -677,13 +703,15 @@ int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
*/
void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
- if (XE_WARN_ON(!bo->ggtt_node))
+ u8 tile_id = ggtt->tile->id;
+
+ if (XE_WARN_ON(!bo->ggtt_node[tile_id]))
return;
/* This BO is not currently in the GGTT */
- xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size);
+ xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size);
- xe_ggtt_node_remove(bo->ggtt_node,
+ xe_ggtt_node_remove(bo->ggtt_node[tile_id],
bo->flags & XE_BO_FLAG_GGTT_INVALIDATE);
}
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.c b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
index 50361b4638f9..869b43a4151d 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.c
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
@@ -63,13 +63,24 @@ int xe_sched_init(struct xe_gpu_scheduler *sched,
atomic_t *score, const char *name,
struct device *dev)
{
+ const struct drm_sched_init_args args = {
+ .ops = ops,
+ .submit_wq = submit_wq,
+ .num_rqs = 1,
+ .credit_limit = hw_submission,
+ .hang_limit = hang_limit,
+ .timeout = timeout,
+ .timeout_wq = timeout_wq,
+ .score = score,
+ .name = name,
+ .dev = dev,
+ };
+
sched->ops = xe_ops;
INIT_LIST_HEAD(&sched->msgs);
INIT_WORK(&sched->work_process_msg, xe_sched_process_msg_work);
- return drm_sched_init(&sched->base, ops, submit_wq, 1, hw_submission,
- hang_limit, timeout, timeout_wq, score, name,
- dev);
+ return drm_sched_init(&sched->base, &args);
}
void xe_sched_fini(struct xe_gpu_scheduler *sched)
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index 64b2ae6839db..308061f0cf37 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -51,7 +51,15 @@ static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
{
- drm_sched_resubmit_jobs(&sched->base);
+ struct drm_sched_job *s_job;
+
+ list_for_each_entry(s_job, &sched->base.pending_list, list) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+ struct dma_fence *hw_fence = s_fence->parent;
+
+ if (hw_fence && !dma_fence_is_signaled(hw_fence))
+ sched->base.ops->run_job(s_job);
+ }
}
static inline bool
@@ -71,8 +79,14 @@ static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched,
static inline
struct xe_sched_job *xe_sched_first_pending_job(struct xe_gpu_scheduler *sched)
{
- return list_first_entry_or_null(&sched->base.pending_list,
- struct xe_sched_job, drm.list);
+ struct xe_sched_job *job;
+
+ spin_lock(&sched->base.job_list_lock);
+ job = list_first_entry_or_null(&sched->base.pending_list,
+ struct xe_sched_job, drm.list);
+ spin_unlock(&sched->base.job_list_lock);
+
+ return job;
}
static inline int
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index 6fbea70d3d36..0bcf97063ff6 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -34,6 +34,7 @@
#include "instructions/xe_gsc_commands.h"
#include "regs/xe_gsc_regs.h"
#include "regs/xe_gt_regs.h"
+#include "regs/xe_irq_regs.h"
static struct xe_gt *
gsc_to_gt(struct xe_gsc *gsc)
@@ -179,7 +180,7 @@ out_bo:
static int gsc_fw_is_loaded(struct xe_gt *gt)
{
- return xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE)) &
+ return xe_mmio_read32(&gt->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE)) &
HECI1_FWSTS1_INIT_COMPLETE;
}
@@ -190,7 +191,7 @@ static int gsc_fw_wait(struct xe_gt *gt)
* executed by the GSCCS. To account for possible submission delays or
* other issues, we use a 500ms timeout in the wait here.
*/
- return xe_mmio_wait32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE),
+ return xe_mmio_wait32(&gt->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE),
HECI1_FWSTS1_INIT_COMPLETE,
HECI1_FWSTS1_INIT_COMPLETE,
500 * USEC_PER_MSEC, NULL, false);
@@ -260,19 +261,17 @@ static int gsc_upload_and_init(struct xe_gsc *gsc)
{
struct xe_gt *gt = gsc_to_gt(gsc);
struct xe_tile *tile = gt_to_tile(gt);
+ unsigned int fw_ref;
int ret;
if (XE_WA(tile->primary_gt, 14018094691)) {
- ret = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
+ fw_ref = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
/*
* If the forcewake fails we want to keep going, because the worst
* case outcome in failing to apply the WA is that PXP won't work,
- * which is not fatal. We still throw a warning so the issue is
- * seen if it happens.
+ * which is not fatal. Forcewake get warns implicitly in case of failure
*/
- xe_gt_WARN_ON(tile->primary_gt, ret);
-
xe_gt_mcr_multicast_write(tile->primary_gt,
EU_SYSTOLIC_LIC_THROTTLE_CTL_WITH_LOCK,
EU_SYSTOLIC_LIC_THROTTLE_CTL_LOCK_BIT);
@@ -281,7 +280,7 @@ static int gsc_upload_and_init(struct xe_gsc *gsc)
ret = gsc_upload(gsc);
if (XE_WA(tile->primary_gt, 14018094691))
- xe_force_wake_put(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
+ xe_force_wake_put(gt_to_fw(tile->primary_gt), fw_ref);
if (ret)
return ret;
@@ -330,7 +329,7 @@ static int gsc_er_complete(struct xe_gt *gt)
* so in that scenario we're always guaranteed to find the correct
* value.
*/
- er_status = xe_mmio_read32(gt, GSCI_TIMER_STATUS) & GSCI_TIMER_STATUS_VALUE;
+ er_status = xe_mmio_read32(&gt->mmio, GSCI_TIMER_STATUS) & GSCI_TIMER_STATUS_VALUE;
if (er_status == GSCI_TIMER_STATUS_TIMER_EXPIRED) {
/*
@@ -351,6 +350,7 @@ static void gsc_work(struct work_struct *work)
struct xe_gsc *gsc = container_of(work, typeof(*gsc), work);
struct xe_gt *gt = gsc_to_gt(gsc);
struct xe_device *xe = gt_to_xe(gt);
+ unsigned int fw_ref;
u32 actions;
int ret;
@@ -360,7 +360,7 @@ static void gsc_work(struct work_struct *work)
spin_unlock_irq(&gsc->lock);
xe_pm_runtime_get(xe);
- xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC));
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
if (actions & GSC_ACTION_ER_COMPLETE) {
ret = gsc_er_complete(gt);
@@ -380,7 +380,7 @@ static void gsc_work(struct work_struct *work)
xe_gsc_proxy_request_handler(gsc);
out:
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(xe);
}
@@ -555,13 +555,26 @@ void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc)
flush_work(&gsc->work);
}
-/**
- * xe_gsc_remove() - Clean up the GSC structures before driver removal
- * @gsc: the GSC uC
- */
-void xe_gsc_remove(struct xe_gsc *gsc)
+void xe_gsc_stop_prepare(struct xe_gsc *gsc)
{
- xe_gsc_proxy_remove(gsc);
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ int ret;
+
+ if (!xe_uc_fw_is_loadable(&gsc->fw) || xe_uc_fw_is_in_error_state(&gsc->fw))
+ return;
+
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GSC);
+
+ /*
+ * If the GSC FW load or the proxy init are interrupted, the only way
+ * to recover it is to do an FLR and reload the GSC from scratch.
+ * Therefore, let's wait for the init to complete before stopping
+ * operations. The proxy init is the last step, so we can just wait on
+ * that
+ */
+ ret = xe_gsc_wait_for_proxy_init_done(gsc);
+ if (ret)
+ xe_gt_err(gt, "failed to wait for GSC init completion before uc stop\n");
}
/*
@@ -581,11 +594,11 @@ void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep)
if (!XE_WA(gt, 14015076503) || !gsc_fw_is_loaded(gt))
return;
- xe_mmio_rmw32(gt, HECI_H_GS1(MTL_GSC_HECI2_BASE), gs1_clr, gs1_set);
+ xe_mmio_rmw32(&gt->mmio, HECI_H_GS1(MTL_GSC_HECI2_BASE), gs1_clr, gs1_set);
if (prep) {
/* make sure the reset bit is clear when writing the CSR reg */
- xe_mmio_rmw32(gt, HECI_H_CSR(MTL_GSC_HECI2_BASE),
+ xe_mmio_rmw32(&gt->mmio, HECI_H_CSR(MTL_GSC_HECI2_BASE),
HECI_H_CSR_RST, HECI_H_CSR_IG);
msleep(200);
}
@@ -599,7 +612,8 @@ void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep)
void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p)
{
struct xe_gt *gt = gsc_to_gt(gsc);
- int err;
+ struct xe_mmio *mmio = &gt->mmio;
+ unsigned int fw_ref;
xe_uc_fw_print(&gsc->fw, p);
@@ -608,17 +622,17 @@ void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p)
if (!xe_uc_fw_is_enabled(&gsc->fw))
return;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
- if (err)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
+ if (!fw_ref)
return;
drm_printf(p, "\nHECI1 FWSTS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
- xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE)),
- xe_mmio_read32(gt, HECI_FWSTS2(MTL_GSC_HECI1_BASE)),
- xe_mmio_read32(gt, HECI_FWSTS3(MTL_GSC_HECI1_BASE)),
- xe_mmio_read32(gt, HECI_FWSTS4(MTL_GSC_HECI1_BASE)),
- xe_mmio_read32(gt, HECI_FWSTS5(MTL_GSC_HECI1_BASE)),
- xe_mmio_read32(gt, HECI_FWSTS6(MTL_GSC_HECI1_BASE)));
-
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
+ xe_mmio_read32(mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(mmio, HECI_FWSTS2(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(mmio, HECI_FWSTS3(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(mmio, HECI_FWSTS4(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(mmio, HECI_FWSTS5(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(mmio, HECI_FWSTS6(MTL_GSC_HECI1_BASE)));
+
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
diff --git a/drivers/gpu/drm/xe/xe_gsc.h b/drivers/gpu/drm/xe/xe_gsc.h
index e282b9ef6ec4..b8b8e0810ad9 100644
--- a/drivers/gpu/drm/xe/xe_gsc.h
+++ b/drivers/gpu/drm/xe/xe_gsc.h
@@ -16,8 +16,8 @@ struct xe_hw_engine;
int xe_gsc_init(struct xe_gsc *gsc);
int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc);
void xe_gsc_wait_for_worker_completion(struct xe_gsc *gsc);
+void xe_gsc_stop_prepare(struct xe_gsc *gsc);
void xe_gsc_load_start(struct xe_gsc *gsc);
-void xe_gsc_remove(struct xe_gsc *gsc);
void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec);
void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep);
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
index 2d6ea8c01445..d0519cd6704a 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
@@ -65,12 +65,23 @@ gsc_to_gt(struct xe_gsc *gsc)
bool xe_gsc_proxy_init_done(struct xe_gsc *gsc)
{
struct xe_gt *gt = gsc_to_gt(gsc);
- u32 fwsts1 = xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE));
+ u32 fwsts1 = xe_mmio_read32(&gt->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE));
return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fwsts1) ==
HECI1_FWSTS1_PROXY_STATE_NORMAL;
}
+int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+
+ /* Proxy init can take up to 500ms, so wait double that for safety */
+ return xe_mmio_wait32(&gt->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE),
+ HECI1_FWSTS1_CURRENT_STATE,
+ HECI1_FWSTS1_PROXY_STATE_NORMAL,
+ USEC_PER_SEC, NULL, false);
+}
+
static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set)
{
struct xe_gt *gt = gsc_to_gt(gsc);
@@ -78,7 +89,7 @@ static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set)
/* make sure we never accidentally write the RST bit */
clr |= HECI_H_CSR_RST;
- xe_mmio_rmw32(gt, HECI_H_CSR(MTL_GSC_HECI2_BASE), clr, set);
+ xe_mmio_rmw32(&gt->mmio, HECI_H_CSR(MTL_GSC_HECI2_BASE), clr, set);
}
static void gsc_proxy_irq_clear(struct xe_gsc *gsc)
@@ -139,17 +150,29 @@ static int proxy_send_to_gsc(struct xe_gsc *gsc, u32 size)
return 0;
}
-static int validate_proxy_header(struct xe_gsc_proxy_header *header,
+static int validate_proxy_header(struct xe_gt *gt,
+ struct xe_gsc_proxy_header *header,
u32 source, u32 dest, u32 max_size)
{
u32 type = FIELD_GET(GSC_PROXY_TYPE, header->hdr);
u32 length = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, header->hdr);
+ int ret = 0;
- if (header->destination != dest || header->source != source)
- return -ENOEXEC;
+ if (header->destination != dest || header->source != source) {
+ ret = -ENOEXEC;
+ goto out;
+ }
+
+ if (length + PROXY_HDR_SIZE > max_size) {
+ ret = -E2BIG;
+ goto out;
+ }
- if (length + PROXY_HDR_SIZE > max_size)
- return -E2BIG;
+ /* We only care about the status if this is a message for the driver */
+ if (dest == GSC_PROXY_ADDRESSING_KMD && header->status != 0) {
+ ret = -EIO;
+ goto out;
+ }
switch (type) {
case GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD:
@@ -157,12 +180,20 @@ static int validate_proxy_header(struct xe_gsc_proxy_header *header,
break;
fallthrough;
case GSC_PROXY_MSG_TYPE_PROXY_INVALID:
- return -EIO;
+ ret = -EIO;
+ break;
default:
break;
}
- return 0;
+out:
+ if (ret)
+ xe_gt_err(gt,
+ "GSC proxy error: s=0x%x[0x%x], d=0x%x[0x%x], t=%u, l=0x%x, st=0x%x\n",
+ header->source, source, header->destination, dest,
+ type, length, header->status);
+
+ return ret;
}
#define proxy_header_wr(xe_, map_, offset_, field_, val_) \
@@ -228,12 +259,17 @@ static int proxy_query(struct xe_gsc *gsc)
xe_map_memcpy_from(xe, to_csme_hdr, &gsc->proxy.from_gsc,
reply_offset, PROXY_HDR_SIZE);
- /* stop if this was the last message */
- if (FIELD_GET(GSC_PROXY_TYPE, to_csme_hdr->hdr) == GSC_PROXY_MSG_TYPE_PROXY_END)
+ /* Check the status and stop if this was the last message */
+ if (FIELD_GET(GSC_PROXY_TYPE, to_csme_hdr->hdr) == GSC_PROXY_MSG_TYPE_PROXY_END) {
+ ret = validate_proxy_header(gt, to_csme_hdr,
+ GSC_PROXY_ADDRESSING_GSC,
+ GSC_PROXY_ADDRESSING_KMD,
+ GSC_PROXY_BUFFER_SIZE - reply_offset);
break;
+ }
/* make sure the GSC-to-CSME proxy header is sane */
- ret = validate_proxy_header(to_csme_hdr,
+ ret = validate_proxy_header(gt, to_csme_hdr,
GSC_PROXY_ADDRESSING_GSC,
GSC_PROXY_ADDRESSING_CSME,
GSC_PROXY_BUFFER_SIZE - reply_offset);
@@ -262,7 +298,7 @@ static int proxy_query(struct xe_gsc *gsc)
}
/* make sure the CSME-to-GSC proxy header is sane */
- ret = validate_proxy_header(gsc->proxy.from_csme,
+ ret = validate_proxy_header(gt, gsc->proxy.from_csme,
GSC_PROXY_ADDRESSING_CSME,
GSC_PROXY_ADDRESSING_GSC,
GSC_PROXY_BUFFER_SIZE - reply_offset);
@@ -398,6 +434,34 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
return 0;
}
+static void xe_gsc_proxy_remove(void *arg)
+{
+ struct xe_gsc *gsc = arg;
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int fw_ref = 0;
+
+ if (!gsc->proxy.component_added)
+ return;
+
+ /* disable HECI2 IRQs */
+ xe_pm_runtime_get(xe);
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
+ if (!fw_ref)
+ xe_gt_err(gt, "failed to get forcewake to disable GSC interrupts\n");
+
+ /* try do disable irq even if forcewake failed */
+ gsc_proxy_irq_toggle(gsc, false);
+
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ xe_pm_runtime_put(xe);
+
+ xe_gsc_wait_for_worker_completion(gsc);
+
+ component_del(xe->drm.dev, &xe_gsc_proxy_component_ops);
+ gsc->proxy.component_added = false;
+}
+
/**
* xe_gsc_proxy_init() - init objects and MEI component required by GSC proxy
* @gsc: the GSC uC
@@ -437,41 +501,7 @@ int xe_gsc_proxy_init(struct xe_gsc *gsc)
gsc->proxy.component_added = true;
- /* the component must be removed before unload, so can't use drmm for cleanup */
-
- return 0;
-}
-
-/**
- * xe_gsc_proxy_remove() - remove the GSC proxy MEI component
- * @gsc: the GSC uC
- */
-void xe_gsc_proxy_remove(struct xe_gsc *gsc)
-{
- struct xe_gt *gt = gsc_to_gt(gsc);
- struct xe_device *xe = gt_to_xe(gt);
- int err = 0;
-
- if (!gsc->proxy.component_added)
- return;
-
- /* disable HECI2 IRQs */
- xe_pm_runtime_get(xe);
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
- if (err)
- xe_gt_err(gt, "failed to get forcewake to disable GSC interrupts\n");
-
- /* try do disable irq even if forcewake failed */
- gsc_proxy_irq_toggle(gsc, false);
-
- if (!err)
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
- xe_pm_runtime_put(xe);
-
- xe_gsc_wait_for_worker_completion(gsc);
-
- component_del(xe->drm.dev, &xe_gsc_proxy_component_ops);
- gsc->proxy.component_added = false;
+ return devm_add_action_or_reset(xe->drm.dev, xe_gsc_proxy_remove, gsc);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.h b/drivers/gpu/drm/xe/xe_gsc_proxy.h
index c511ade6b863..765602221dbc 100644
--- a/drivers/gpu/drm/xe/xe_gsc_proxy.h
+++ b/drivers/gpu/drm/xe/xe_gsc_proxy.h
@@ -12,7 +12,7 @@ struct xe_gsc;
int xe_gsc_proxy_init(struct xe_gsc *gsc);
bool xe_gsc_proxy_init_done(struct xe_gsc *gsc);
-void xe_gsc_proxy_remove(struct xe_gsc *gsc);
+int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc);
int xe_gsc_proxy_start(struct xe_gsc *gsc);
int xe_gsc_proxy_request_handler(struct xe_gsc *gsc);
diff --git a/drivers/gpu/drm/xe/xe_gsc_types.h b/drivers/gpu/drm/xe/xe_gsc_types.h
index 5926de20214c..97c056656df0 100644
--- a/drivers/gpu/drm/xe/xe_gsc_types.h
+++ b/drivers/gpu/drm/xe/xe_gsc_types.h
@@ -13,6 +13,7 @@
#include <linux/workqueue.h>
#include "xe_uc_fw_types.h"
+#include "xe_device_types.h"
struct xe_bo;
struct xe_exec_queue;
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index d5fd6a089b7c..6c4cb9576fb6 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -12,13 +12,16 @@
#include <generated/xe_wa_oob.h>
+#include "instructions/xe_alu_commands.h"
#include "instructions/xe_gfxpipe_commands.h"
#include "instructions/xe_mi_commands.h"
+#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
#include "xe_bb.h"
#include "xe_bo.h"
#include "xe_device.h"
+#include "xe_eu_stall.h"
#include "xe_exec_queue.h"
#include "xe_execlist.h"
#include "xe_force_wake.h"
@@ -32,6 +35,7 @@
#include "xe_gt_pagefault.h"
#include "xe_gt_printk.h"
#include "xe_gt_sriov_pf.h"
+#include "xe_gt_sriov_vf.h"
#include "xe_gt_sysfs.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_gt_topology.h"
@@ -77,7 +81,8 @@ struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
return ERR_PTR(-ENOMEM);
gt->tile = tile;
- gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0);
+ gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq",
+ WQ_MEM_RECLAIM);
err = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt);
if (err)
@@ -97,14 +102,14 @@ void xe_gt_sanitize(struct xe_gt *gt)
static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
{
+ unsigned int fw_ref;
u32 reg;
- int err;
if (!XE_WA(gt, 16023588340))
return;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (WARN_ON(err))
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
return;
if (!xe_gt_is_media_type(gt)) {
@@ -113,14 +118,14 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
}
- xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3);
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0xF);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
{
+ unsigned int fw_ref;
u32 reg;
- int err;
if (!XE_WA(gt, 16023588340))
return;
@@ -128,35 +133,15 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
if (xe_gt_is_media_type(gt))
return;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (WARN_ON(err))
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
return;
reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
reg &= ~CG_DIS_CNTLBUS;
xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
-}
-
-/**
- * xe_gt_remove() - Clean up the GT structures before driver removal
- * @gt: the GT object
- *
- * This function should only act on objects/structures that must be cleaned
- * before the driver removal callback is complete and therefore can't be
- * deferred to a drmm action.
- */
-void xe_gt_remove(struct xe_gt *gt)
-{
- int i;
-
- xe_uc_remove(&gt->uc);
-
- for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
- xe_hw_fence_irq_finish(&gt->fence_irq[i]);
-
- xe_gt_disable_host_l2_vram(gt);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
static void gt_reset_worker(struct work_struct *w);
@@ -193,15 +178,6 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
return 0;
}
-/*
- * Convert back from encoded value to type-safe, only to be used when reg.mcr
- * is true
- */
-static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg)
-{
- return (const struct xe_reg_mcr){.__reg.raw = reg.raw };
-}
-
static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
{
struct xe_reg_sr *sr = &q->hwe->reg_lrc;
@@ -211,6 +187,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
struct xe_bb *bb;
struct dma_fence *fence;
long timeout;
+ int count_rmw = 0;
int count = 0;
if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
@@ -223,30 +200,32 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
if (IS_ERR(bb))
return PTR_ERR(bb);
- xa_for_each(&sr->xa, idx, entry)
- ++count;
+ /* count RMW registers as those will be handled separately */
+ xa_for_each(&sr->xa, idx, entry) {
+ if (entry->reg.masked || entry->clr_bits == ~0)
+ ++count;
+ else
+ ++count_rmw;
+ }
- if (count) {
+ if (count || count_rmw)
xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
+ if (count) {
+ /* emit single LRI with all non RMW regs */
+
bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
xa_for_each(&sr->xa, idx, entry) {
struct xe_reg reg = entry->reg;
- struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg);
u32 val;
- /*
- * Skip reading the register if it's not really needed
- */
if (reg.masked)
val = entry->clr_bits << 16;
- else if (entry->clr_bits + 1)
- val = (reg.mcr ?
- xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
- xe_mmio_read32(gt, reg)) & (~entry->clr_bits);
- else
+ else if (entry->clr_bits == ~0)
val = 0;
+ else
+ continue;
val |= entry->set_bits;
@@ -256,6 +235,52 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
}
}
+ if (count_rmw) {
+ /* emit MI_MATH for each RMW reg */
+
+ xa_for_each(&sr->xa, idx, entry) {
+ if (entry->reg.masked || entry->clr_bits == ~0)
+ continue;
+
+ bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO;
+ bb->cs[bb->len++] = entry->reg.addr;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
+
+ bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
+ MI_LRI_LRM_CS_MMIO;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr;
+ bb->cs[bb->len++] = entry->clr_bits;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr;
+ bb->cs[bb->len++] = entry->set_bits;
+
+ bb->cs[bb->len++] = MI_MATH(8);
+ bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0);
+ bb->cs[bb->len++] = CS_ALU_INSTR_LOADINV(SRCB, REG1);
+ bb->cs[bb->len++] = CS_ALU_INSTR_AND;
+ bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU);
+ bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0);
+ bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCB, REG2);
+ bb->cs[bb->len++] = CS_ALU_INSTR_OR;
+ bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU);
+
+ bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
+ bb->cs[bb->len++] = entry->reg.addr;
+
+ xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x\n",
+ entry->reg.addr, entry->clr_bits, entry->set_bits);
+ }
+
+ /* reset used GPR */
+ bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) | MI_LRI_LRM_CS_MMIO;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
+ bb->cs[bb->len++] = 0;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr;
+ bb->cs[bb->len++] = 0;
+ bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr;
+ bb->cs[bb->len++] = 0;
+ }
+
xe_lrc_emit_hwe_state_instructions(q, bb);
job = xe_bb_create_job(q, bb);
@@ -379,13 +404,19 @@ int xe_gt_init_early(struct xe_gt *gt)
if (err)
return err;
- xe_wa_process_gt(gt);
+ err = xe_tuning_init(gt);
+ if (err)
+ return err;
+
xe_wa_process_oob(gt);
- xe_tuning_process_gt(gt);
xe_force_wake_init_gt(gt, gt_to_fw(gt));
spin_lock_init(&gt->global_invl_lock);
+ err = xe_gt_tlb_invalidation_init_early(gt);
+ if (err)
+ return err;
+
return 0;
}
@@ -402,11 +433,12 @@ static void dump_pat_on_error(struct xe_gt *gt)
static int gt_fw_domain_init(struct xe_gt *gt)
{
- int err, i;
+ unsigned int fw_ref;
+ int err;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- goto err_hw_fence_irq;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
if (!xe_gt_is_media_type(gt)) {
err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
@@ -439,32 +471,32 @@ static int gt_fw_domain_init(struct xe_gt *gt)
* Stash hardware-reported version. Since this register does not exist
* on pre-MTL platforms, reading it there will (correctly) return 0.
*/
- gt->info.gmdid = xe_mmio_read32(gt, GMD_ID);
-
- err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
- XE_WARN_ON(err);
+ gt->info.gmdid = xe_mmio_read32(&gt->mmio, GMD_ID);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
return 0;
err_force_wake:
dump_pat_on_error(gt);
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
-err_hw_fence_irq:
- for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
- xe_hw_fence_irq_finish(&gt->fence_irq[i]);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
return err;
}
static int all_fw_domain_init(struct xe_gt *gt)
{
- int err, i;
+ unsigned int fw_ref;
+ int err;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (err)
- goto err_hw_fence_irq;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ err = -ETIMEDOUT;
+ goto err_force_wake;
+ }
xe_gt_mcr_set_implicit_defaults(gt);
+ xe_wa_process_gt(gt);
+ xe_tuning_process_gt(gt);
xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
err = xe_gt_clock_init(gt);
@@ -523,19 +555,17 @@ static int all_fw_domain_init(struct xe_gt *gt)
if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
- if (IS_SRIOV_PF(gt_to_xe(gt)))
+ if (IS_SRIOV_PF(gt_to_xe(gt))) {
+ xe_gt_sriov_pf_init(gt);
xe_gt_sriov_pf_init_hw(gt);
+ }
- err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- XE_WARN_ON(err);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
return 0;
err_force_wake:
- xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
-err_hw_fence_irq:
- for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
- xe_hw_fence_irq_finish(&gt->fence_irq[i]);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
return err;
}
@@ -546,11 +576,12 @@ err_hw_fence_irq:
*/
int xe_gt_init_hwconfig(struct xe_gt *gt)
{
+ unsigned int fw_ref;
int err;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- goto out;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
xe_gt_mcr_init_early(gt);
xe_pat_init(gt);
@@ -568,11 +599,21 @@ int xe_gt_init_hwconfig(struct xe_gt *gt)
xe_gt_enable_host_l2_vram(gt);
out_fw:
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
-out:
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
return err;
}
+static void xe_gt_fini(void *arg)
+{
+ struct xe_gt *gt = arg;
+ int i;
+
+ for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
+ xe_hw_fence_irq_finish(&gt->fence_irq[i]);
+
+ xe_gt_disable_host_l2_vram(gt);
+}
+
int xe_gt_init(struct xe_gt *gt)
{
int err;
@@ -585,7 +626,7 @@ int xe_gt_init(struct xe_gt *gt)
xe_hw_fence_irq_init(&gt->fence_irq[i]);
}
- err = xe_gt_tlb_invalidation_init(gt);
+ err = devm_add_action_or_reset(gt_to_xe(gt)->drm.dev, xe_gt_fini, gt);
if (err)
return err;
@@ -619,9 +660,39 @@ int xe_gt_init(struct xe_gt *gt)
xe_gt_record_user_engines(gt);
+ err = xe_eu_stall_init(gt);
+ if (err)
+ return err;
+
return 0;
}
+/**
+ * xe_gt_mmio_init() - Initialize GT's MMIO access
+ * @gt: the GT object
+ *
+ * Initialize GT's MMIO accessor, which will be used to access registers inside
+ * this GT.
+ */
+void xe_gt_mmio_init(struct xe_gt *gt)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = tile_to_xe(tile);
+
+ xe_mmio_init(&gt->mmio, tile, tile->mmio.regs, tile->mmio.regs_size);
+
+ if (gt->info.type == XE_GT_TYPE_MEDIA) {
+ gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET;
+ gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH;
+ } else {
+ gt->mmio.adj_offset = 0;
+ gt->mmio.adj_limit = 0;
+ }
+
+ if (IS_SRIOV_VF(xe))
+ gt->mmio.sriov_vf_gt = gt;
+}
+
void xe_gt_record_user_engines(struct xe_gt *gt)
{
struct xe_hw_engine *hwe;
@@ -647,10 +718,13 @@ static int do_gt_reset(struct xe_gt *gt)
{
int err;
+ if (IS_SRIOV_VF(gt_to_xe(gt)))
+ return xe_gt_sriov_vf_reset(gt);
+
xe_gsc_wa_14015076503(gt, true);
- xe_mmio_write32(gt, GDRST, GRDOM_FULL);
- err = xe_mmio_wait32(gt, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
+ xe_mmio_write32(&gt->mmio, GDRST, GRDOM_FULL);
+ err = xe_mmio_wait32(&gt->mmio, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
if (err)
xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n",
ERR_PTR(err));
@@ -721,10 +795,8 @@ static int do_gt_restart(struct xe_gt *gt)
if (err)
return err;
- for_each_hw_engine(hwe, gt, id) {
+ for_each_hw_engine(hwe, gt, id)
xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
- xe_reg_sr_apply_whitelist(hwe);
- }
/* Get CCS mode in sync between sw/hw */
xe_gt_apply_ccs_mode(gt);
@@ -740,6 +812,7 @@ static int do_gt_restart(struct xe_gt *gt)
static int gt_reset(struct xe_gt *gt)
{
+ unsigned int fw_ref;
int err;
if (xe_device_wedged(gt_to_xe(gt)))
@@ -760,9 +833,11 @@ static int gt_reset(struct xe_gt *gt)
xe_gt_sanitize(gt);
- err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (err)
- goto err_msg;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ err = -ETIMEDOUT;
+ goto err_out;
+ }
xe_uc_gucrc_disable(&gt->uc);
xe_uc_stop_prepare(&gt->uc);
@@ -780,8 +855,7 @@ static int gt_reset(struct xe_gt *gt)
if (err)
goto err_out;
- err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- XE_WARN_ON(err);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(gt_to_xe(gt));
xe_gt_info(gt, "reset done\n");
@@ -789,8 +863,7 @@ static int gt_reset(struct xe_gt *gt)
return 0;
err_out:
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
-err_msg:
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
XE_WARN_ON(xe_uc_start(&gt->uc));
err_fail:
xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
@@ -810,7 +883,7 @@ static void gt_reset_worker(struct work_struct *w)
void xe_gt_reset_async(struct xe_gt *gt)
{
- xe_gt_info(gt, "trying reset\n");
+ xe_gt_info(gt, "trying reset from %ps\n", __builtin_return_address(0));
/* Don't do a reset while one is already in flight */
if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(&gt->uc))
@@ -822,22 +895,25 @@ void xe_gt_reset_async(struct xe_gt *gt)
void xe_gt_suspend_prepare(struct xe_gt *gt)
{
- XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ unsigned int fw_ref;
- xe_uc_stop_prepare(&gt->uc);
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_uc_suspend_prepare(&gt->uc);
+
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
int xe_gt_suspend(struct xe_gt *gt)
{
+ unsigned int fw_ref;
int err;
xe_gt_dbg(gt, "suspending\n");
xe_gt_sanitize(gt);
- err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (err)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
goto err_msg;
err = xe_uc_suspend(&gt->uc);
@@ -848,19 +924,29 @@ int xe_gt_suspend(struct xe_gt *gt)
xe_gt_disable_host_l2_vram(gt);
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_gt_dbg(gt, "suspended\n");
return 0;
-err_force_wake:
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
err_msg:
+ err = -ETIMEDOUT;
+err_force_wake:
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
return err;
}
+void xe_gt_shutdown(struct xe_gt *gt)
+{
+ unsigned int fw_ref;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ do_gt_reset(gt);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+}
+
/**
* xe_gt_sanitize_freq() - Restore saved frequencies if necessary.
* @gt: the GT object
@@ -883,11 +969,12 @@ int xe_gt_sanitize_freq(struct xe_gt *gt)
int xe_gt_resume(struct xe_gt *gt)
{
+ unsigned int fw_ref;
int err;
xe_gt_dbg(gt, "resuming\n");
- err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (err)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
goto err_msg;
err = do_gt_restart(gt);
@@ -896,14 +983,15 @@ int xe_gt_resume(struct xe_gt *gt)
xe_gt_idle_enable_pg(gt);
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_gt_dbg(gt, "resumed\n");
return 0;
-err_force_wake:
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
err_msg:
+ err = -ETIMEDOUT;
+err_force_wake:
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
return err;
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index ee138e9768a2..187fa6490eaf 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -31,12 +31,13 @@ struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
int xe_gt_init_hwconfig(struct xe_gt *gt);
int xe_gt_init_early(struct xe_gt *gt);
int xe_gt_init(struct xe_gt *gt);
+void xe_gt_mmio_init(struct xe_gt *gt);
void xe_gt_declare_wedged(struct xe_gt *gt);
int xe_gt_record_default_lrcs(struct xe_gt *gt);
/**
* xe_gt_record_user_engines - save data related to engines available to
- * usersapce
+ * userspace
* @gt: GT structure
*
* Walk the available HW engines from gt->info.engine_mask and calculate data
@@ -48,11 +49,36 @@ void xe_gt_record_user_engines(struct xe_gt *gt);
void xe_gt_suspend_prepare(struct xe_gt *gt);
int xe_gt_suspend(struct xe_gt *gt);
+void xe_gt_shutdown(struct xe_gt *gt);
int xe_gt_resume(struct xe_gt *gt);
void xe_gt_reset_async(struct xe_gt *gt);
void xe_gt_sanitize(struct xe_gt *gt);
int xe_gt_sanitize_freq(struct xe_gt *gt);
-void xe_gt_remove(struct xe_gt *gt);
+
+/**
+ * xe_gt_wait_for_reset - wait for gt's async reset to finalize.
+ * @gt: GT structure
+ * Return:
+ * %true if it waited for the work to finish execution,
+ * %false if there was no scheduled reset or it was done.
+ */
+static inline bool xe_gt_wait_for_reset(struct xe_gt *gt)
+{
+ return flush_work(&gt->reset.worker);
+}
+
+/**
+ * xe_gt_reset - perform synchronous reset
+ * @gt: GT structure
+ * Return:
+ * %true if it waited for the reset to finish,
+ * %false if there was no scheduled reset.
+ */
+static inline bool xe_gt_reset(struct xe_gt *gt)
+{
+ xe_gt_reset_async(gt);
+ return xe_gt_wait_for_reset(gt);
+}
/**
* xe_gt_any_hw_engine_by_reset_domain - scan the list of engines and return the
diff --git a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
index d2e4dc3aaf61..50fffc9ebf62 100644
--- a/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
+++ b/drivers/gpu/drm/xe/xe_gt_ccs_mode.c
@@ -68,7 +68,13 @@ static void __xe_gt_apply_ccs_mode(struct xe_gt *gt, u32 num_engines)
}
}
- xe_mmio_write32(gt, CCS_MODE, mode);
+ /*
+ * Mask bits need to be set for the register. Though only Xe2+
+ * platforms require setting of mask bits, it won't harm for older
+ * platforms as these bits are unused there.
+ */
+ mode |= CCS_MODE_CSLICE_0_3_MASK << 16;
+ xe_mmio_write32(&gt->mmio, CCS_MODE, mode);
xe_gt_dbg(gt, "CCS_MODE=%x config:%08x, num_engines:%d, num_slices:%d\n",
mode, config, num_engines, num_slices);
@@ -133,9 +139,10 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr,
}
/* CCS mode can only be updated when there are no drm clients */
- spin_lock(&xe->clients.lock);
- if (xe->clients.count) {
- spin_unlock(&xe->clients.lock);
+ mutex_lock(&xe->drm.filelist_mutex);
+ if (!list_empty(&xe->drm.filelist)) {
+ mutex_unlock(&xe->drm.filelist_mutex);
+ xe_gt_dbg(gt, "Rejecting compute mode change as there are active drm clients\n");
return -EBUSY;
}
@@ -143,10 +150,10 @@ ccs_mode_store(struct device *kdev, struct device_attribute *attr,
xe_gt_info(gt, "Setting compute mode to %d\n", num_engines);
gt->ccs_mode = num_engines;
xe_gt_record_user_engines(gt);
- xe_gt_reset_async(gt);
+ xe_gt_reset(gt);
}
- spin_unlock(&xe->clients.lock);
+ mutex_unlock(&xe->drm.filelist_mutex);
return count;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_clock.c b/drivers/gpu/drm/xe/xe_gt_clock.c
index 86c2d62b4bdc..4f011d1573c6 100644
--- a/drivers/gpu/drm/xe/xe_gt_clock.c
+++ b/drivers/gpu/drm/xe/xe_gt_clock.c
@@ -12,71 +12,82 @@
#include "xe_assert.h"
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_macros.h"
#include "xe_mmio.h"
-static u32 read_reference_ts_freq(struct xe_gt *gt)
-{
- u32 ts_override = xe_mmio_read32(gt, TIMESTAMP_OVERRIDE);
- u32 base_freq, frac_freq;
-
- base_freq = REG_FIELD_GET(TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK,
- ts_override) + 1;
- base_freq *= 1000000;
-
- frac_freq = REG_FIELD_GET(TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK,
- ts_override);
- frac_freq = 1000000 / (frac_freq + 1);
-
- return base_freq + frac_freq;
-}
+#define f19_2_mhz 19200000
+#define f24_mhz 24000000
+#define f25_mhz 25000000
+#define f38_4_mhz 38400000
+#define ts_base_83 83333
+#define ts_base_52 52083
+#define ts_base_80 80000
-static u32 get_crystal_clock_freq(u32 rpm_config_reg)
+static void read_crystal_clock(struct xe_gt *gt, u32 rpm_config_reg, u32 *freq,
+ u32 *timestamp_base)
{
- const u32 f19_2_mhz = 19200000;
- const u32 f24_mhz = 24000000;
- const u32 f25_mhz = 25000000;
- const u32 f38_4_mhz = 38400000;
u32 crystal_clock = REG_FIELD_GET(RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK,
rpm_config_reg);
switch (crystal_clock) {
case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
- return f24_mhz;
+ *freq = f24_mhz;
+ *timestamp_base = ts_base_83;
+ return;
case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
- return f19_2_mhz;
+ *freq = f19_2_mhz;
+ *timestamp_base = ts_base_52;
+ return;
case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
- return f38_4_mhz;
+ *freq = f38_4_mhz;
+ *timestamp_base = ts_base_52;
+ return;
case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
- return f25_mhz;
+ *freq = f25_mhz;
+ *timestamp_base = ts_base_80;
+ return;
default:
- XE_WARN_ON("NOT_POSSIBLE");
- return 0;
+ xe_gt_warn(gt, "Invalid crystal clock frequency: %u", crystal_clock);
+ *freq = 0;
+ *timestamp_base = 0;
+ return;
}
}
-int xe_gt_clock_init(struct xe_gt *gt)
+static void check_ctc_mode(struct xe_gt *gt)
{
- u32 ctc_reg = xe_mmio_read32(gt, CTC_MODE);
- u32 freq = 0;
+ /*
+ * CTC_MODE[0] = 1 is definitely not supported for Xe2 and later
+ * platforms. In theory it could be a valid setting for pre-Xe2
+ * platforms, but there's no documentation on how to properly handle
+ * this case. Reading TIMESTAMP_OVERRIDE, as the driver attempted in
+ * the past has been confirmed as incorrect by the hardware architects.
+ *
+ * For now just warn if we ever encounter hardware in the wild that
+ * has this setting and move on as if it hadn't been set.
+ */
+ if (xe_mmio_read32(&gt->mmio, CTC_MODE) & CTC_SOURCE_DIVIDE_LOGIC)
+ xe_gt_warn(gt, "CTC_MODE[0] is set; this is unexpected and undocumented\n");
+}
- /* Assuming gen11+ so assert this assumption is correct */
- xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11);
+int xe_gt_clock_init(struct xe_gt *gt)
+{
+ u32 freq;
+ u32 c0;
- if (ctc_reg & CTC_SOURCE_DIVIDE_LOGIC) {
- freq = read_reference_ts_freq(gt);
- } else {
- u32 c0 = xe_mmio_read32(gt, RPM_CONFIG0);
+ if (!IS_SRIOV_VF(gt_to_xe(gt)))
+ check_ctc_mode(gt);
- freq = get_crystal_clock_freq(c0);
+ c0 = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
+ read_crystal_clock(gt, c0, &freq, &gt->info.timestamp_base);
- /*
- * Now figure out how the command stream's timestamp
- * register increments from this frequency (it might
- * increment only every few clock cycle).
- */
- freq >>= 3 - REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, c0);
- }
+ /*
+ * Now figure out how the command stream's timestamp
+ * register increments from this frequency (it might
+ * increment only every few clock cycle).
+ */
+ freq >>= 3 - REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, c0);
gt->info.reference_clock = freq;
return 0;
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 8f95d3a5949b..119a55bb7580 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -15,6 +15,7 @@
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_gt_mcr.h"
+#include "xe_gt_idle.h"
#include "xe_gt_sriov_pf_debugfs.h"
#include "xe_gt_sriov_vf_debugfs.h"
#include "xe_gt_stats.h"
@@ -29,6 +30,7 @@
#include "xe_reg_sr.h"
#include "xe_reg_whitelist.h"
#include "xe_sriov.h"
+#include "xe_tuning.h"
#include "xe_uc_debugfs.h"
#include "xe_wa.h"
@@ -89,24 +91,35 @@ static int hw_engines(struct xe_gt *gt, struct drm_printer *p)
struct xe_device *xe = gt_to_xe(gt);
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
- int err;
+ unsigned int fw_ref;
+ int ret = 0;
xe_pm_runtime_get(xe);
- err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (err) {
- xe_pm_runtime_put(xe);
- return err;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ ret = -ETIMEDOUT;
+ goto fw_put;
}
for_each_hw_engine(hwe, gt, id)
xe_hw_engine_print(hwe, p);
- err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+fw_put:
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(xe);
- if (err)
- return err;
- return 0;
+ return ret;
+}
+
+static int powergate_info(struct xe_gt *gt, struct drm_printer *p)
+{
+ int ret;
+
+ xe_pm_runtime_get(gt_to_xe(gt));
+ ret = xe_gt_idle_pg_print(gt, p);
+ xe_pm_runtime_put(gt_to_xe(gt));
+
+ return ret;
}
static int force_reset(struct xe_gt *gt, struct drm_printer *p)
@@ -121,11 +134,9 @@ static int force_reset(struct xe_gt *gt, struct drm_printer *p)
static int force_reset_sync(struct xe_gt *gt, struct drm_printer *p)
{
xe_pm_runtime_get(gt_to_xe(gt));
- xe_gt_reset_async(gt);
+ xe_gt_reset(gt);
xe_pm_runtime_put(gt_to_xe(gt));
- flush_work(&gt->reset.worker);
-
return 0;
}
@@ -208,6 +219,15 @@ static int workarounds(struct xe_gt *gt, struct drm_printer *p)
return 0;
}
+static int tunings(struct xe_gt *gt, struct drm_printer *p)
+{
+ xe_pm_runtime_get(gt_to_xe(gt));
+ xe_tuning_dump(gt, p);
+ xe_pm_runtime_put(gt_to_xe(gt));
+
+ return 0;
+}
+
static int pat(struct xe_gt *gt, struct drm_printer *p)
{
xe_pm_runtime_get(gt_to_xe(gt));
@@ -280,18 +300,20 @@ static int hwconfig(struct xe_gt *gt, struct drm_printer *p)
return 0;
}
-static const struct drm_info_list debugfs_list[] = {
- {"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines},
+/*
+ * only for GT debugfs files which can be safely used on the VF as well:
+ * - without access to the GT privileged registers
+ * - without access to the PF specific data
+ */
+static const struct drm_info_list vf_safe_debugfs_list[] = {
{"force_reset", .show = xe_gt_debugfs_simple_show, .data = force_reset},
{"force_reset_sync", .show = xe_gt_debugfs_simple_show, .data = force_reset_sync},
{"sa_info", .show = xe_gt_debugfs_simple_show, .data = sa_info},
{"topology", .show = xe_gt_debugfs_simple_show, .data = topology},
- {"steering", .show = xe_gt_debugfs_simple_show, .data = steering},
{"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt},
{"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore},
{"workarounds", .show = xe_gt_debugfs_simple_show, .data = workarounds},
- {"pat", .show = xe_gt_debugfs_simple_show, .data = pat},
- {"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs},
+ {"tunings", .show = xe_gt_debugfs_simple_show, .data = tunings},
{"default_lrc_rcs", .show = xe_gt_debugfs_simple_show, .data = rcs_default_lrc},
{"default_lrc_ccs", .show = xe_gt_debugfs_simple_show, .data = ccs_default_lrc},
{"default_lrc_bcs", .show = xe_gt_debugfs_simple_show, .data = bcs_default_lrc},
@@ -301,6 +323,15 @@ static const struct drm_info_list debugfs_list[] = {
{"hwconfig", .show = xe_gt_debugfs_simple_show, .data = hwconfig},
};
+/* everything else should be added here */
+static const struct drm_info_list pf_only_debugfs_list[] = {
+ {"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines},
+ {"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs},
+ {"pat", .show = xe_gt_debugfs_simple_show, .data = pat},
+ {"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info},
+ {"steering", .show = xe_gt_debugfs_simple_show, .data = steering},
+};
+
void xe_gt_debugfs_register(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -324,10 +355,15 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
*/
root->d_inode->i_private = gt;
- drm_debugfs_create_files(debugfs_list,
- ARRAY_SIZE(debugfs_list),
+ drm_debugfs_create_files(vf_safe_debugfs_list,
+ ARRAY_SIZE(vf_safe_debugfs_list),
root, minor);
+ if (!IS_SRIOV_VF(xe))
+ drm_debugfs_create_files(pf_only_debugfs_list,
+ ARRAY_SIZE(pf_only_debugfs_list),
+ root, minor);
+
xe_uc_debugfs_register(&gt->uc, root);
if (IS_SRIOV_PF(xe))
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index ab76973f3e1e..60d9354e7dbf 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -11,9 +11,9 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
-#include "xe_device_types.h"
#include "xe_gt_sysfs.h"
#include "xe_gt_throttle.h"
+#include "xe_gt_types.h"
#include "xe_guc_pc.h"
#include "xe_pm.h"
@@ -32,13 +32,18 @@
* Xe's Freq provides a sysfs API for frequency management:
*
* device/tile#/gt#/freq0/<item>_freq *read-only* files:
+ *
* - act_freq: The actual resolved frequency decided by PCODE.
* - cur_freq: The current one requested by GuC PC to the PCODE.
* - rpn_freq: The Render Performance (RP) N level, which is the minimal one.
+ * - rpa_freq: The Render Performance (RP) A level, which is the achiveable one.
+ * Calculated by PCODE at runtime based on multiple running conditions
* - rpe_freq: The Render Performance (RP) E level, which is the efficient one.
+ * Calculated by PCODE at runtime based on multiple running conditions
* - rp0_freq: The Render Performance (RP) 0 level, which is the maximum one.
*
* device/tile#/gt#/freq0/<item>_freq *read-write* files:
+ *
* - min_freq: Min frequency request.
* - max_freq: Max frequency request.
* If max <= min, then freq_min becomes a fixed frequency request.
@@ -56,9 +61,10 @@ dev_to_xe(struct device *dev)
return gt_to_xe(kobj_to_gt(dev->kobj.parent));
}
-static ssize_t act_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t act_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
@@ -68,11 +74,12 @@ static ssize_t act_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(act_freq);
+static struct kobj_attribute attr_act_freq = __ATTR_RO(act_freq);
-static ssize_t cur_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t cur_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -85,11 +92,12 @@ static ssize_t cur_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(cur_freq);
+static struct kobj_attribute attr_cur_freq = __ATTR_RO(cur_freq);
-static ssize_t rp0_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rp0_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
@@ -99,11 +107,12 @@ static ssize_t rp0_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(rp0_freq);
+static struct kobj_attribute attr_rp0_freq = __ATTR_RO(rp0_freq);
-static ssize_t rpe_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rpe_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
@@ -113,20 +122,37 @@ static ssize_t rpe_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static DEVICE_ATTR_RO(rpe_freq);
+static struct kobj_attribute attr_rpe_freq = __ATTR_RO(rpe_freq);
+
+static ssize_t rpa_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct xe_guc_pc *pc = dev_to_pc(dev);
+ u32 freq;
+
+ xe_pm_runtime_get(dev_to_xe(dev));
+ freq = xe_guc_pc_get_rpa_freq(pc);
+ xe_pm_runtime_put(dev_to_xe(dev));
+
+ return sysfs_emit(buf, "%d\n", freq);
+}
+static struct kobj_attribute attr_rpa_freq = __ATTR_RO(rpa_freq);
-static ssize_t rpn_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t rpn_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
return sysfs_emit(buf, "%d\n", xe_guc_pc_get_rpn_freq(pc));
}
-static DEVICE_ATTR_RO(rpn_freq);
+static struct kobj_attribute attr_rpn_freq = __ATTR_RO(rpn_freq);
-static ssize_t min_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t min_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -140,9 +166,10 @@ static ssize_t min_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
- const char *buff, size_t count)
+static ssize_t min_freq_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff, size_t count)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -159,11 +186,12 @@ static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR_RW(min_freq);
+static struct kobj_attribute attr_min_freq = __ATTR_RW(min_freq);
-static ssize_t max_freq_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t max_freq_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -177,9 +205,10 @@ static ssize_t max_freq_show(struct device *dev,
return sysfs_emit(buf, "%d\n", freq);
}
-static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
- const char *buff, size_t count)
+static ssize_t max_freq_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buff, size_t count)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
u32 freq;
ssize_t ret;
@@ -196,16 +225,17 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
return count;
}
-static DEVICE_ATTR_RW(max_freq);
+static struct kobj_attribute attr_max_freq = __ATTR_RW(max_freq);
static const struct attribute *freq_attrs[] = {
- &dev_attr_act_freq.attr,
- &dev_attr_cur_freq.attr,
- &dev_attr_rp0_freq.attr,
- &dev_attr_rpe_freq.attr,
- &dev_attr_rpn_freq.attr,
- &dev_attr_min_freq.attr,
- &dev_attr_max_freq.attr,
+ &attr_act_freq.attr,
+ &attr_cur_freq.attr,
+ &attr_rp0_freq.attr,
+ &attr_rpa_freq.attr,
+ &attr_rpe_freq.attr,
+ &attr_rpn_freq.attr,
+ &attr_min_freq.attr,
+ &attr_max_freq.attr,
NULL
};
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index 67aba4140510..c11206410a4d 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -69,6 +69,8 @@ static u64 get_residency_ms(struct xe_gt_idle *gtidle, u64 cur_residency)
{
u64 delta, overflow_residency, prev_residency;
+ lockdep_assert_held(&gtidle->lock);
+
overflow_residency = BIT_ULL(32);
/*
@@ -98,7 +100,10 @@ static u64 get_residency_ms(struct xe_gt_idle *gtidle, u64 cur_residency)
void xe_gt_idle_enable_pg(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
- u32 pg_enable;
+ struct xe_gt_idle *gtidle = &gt->gtidle;
+ struct xe_mmio *mmio = &gt->mmio;
+ u32 vcs_mask, vecs_mask;
+ unsigned int fw_ref;
int i, j;
if (IS_SRIOV_VF(xe))
@@ -110,44 +115,144 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt)
xe_device_assert_mem_access(gt_to_xe(gt));
- pg_enable = RENDER_POWERGATE_ENABLE | MEDIA_POWERGATE_ENABLE;
+ vcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_DECODE);
+ vecs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE);
+
+ if (vcs_mask || vecs_mask)
+ gtidle->powergate_enable = MEDIA_POWERGATE_ENABLE;
+
+ if (!xe_gt_is_media_type(gt))
+ gtidle->powergate_enable |= RENDER_POWERGATE_ENABLE;
- for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
- if ((gt->info.engine_mask & BIT(i)))
- pg_enable |= (VDN_HCP_POWERGATE_ENABLE(j) |
- VDN_MFXVDENC_POWERGATE_ENABLE(j));
+ if (xe->info.platform != XE_DG1) {
+ for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
+ if ((gt->info.engine_mask & BIT(i)))
+ gtidle->powergate_enable |= (VDN_HCP_POWERGATE_ENABLE(j) |
+ VDN_MFXVDENC_POWERGATE_ENABLE(j));
+ }
}
- XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (xe->info.skip_guc_pc) {
/*
* GuC sets the hysteresis value when GuC PC is enabled
* else set it to 25 (25 * 1.28us)
*/
- xe_mmio_write32(gt, MEDIA_POWERGATE_IDLE_HYSTERESIS, 25);
- xe_mmio_write32(gt, RENDER_POWERGATE_IDLE_HYSTERESIS, 25);
+ xe_mmio_write32(mmio, MEDIA_POWERGATE_IDLE_HYSTERESIS, 25);
+ xe_mmio_write32(mmio, RENDER_POWERGATE_IDLE_HYSTERESIS, 25);
}
- xe_mmio_write32(gt, POWERGATE_ENABLE, pg_enable);
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FW_GT));
+ xe_mmio_write32(mmio, POWERGATE_ENABLE, gtidle->powergate_enable);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
void xe_gt_idle_disable_pg(struct xe_gt *gt)
{
+ struct xe_gt_idle *gtidle = &gt->gtidle;
+ unsigned int fw_ref;
+
if (IS_SRIOV_VF(gt_to_xe(gt)))
return;
xe_device_assert_mem_access(gt_to_xe(gt));
- XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
+ gtidle->powergate_enable = 0;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ xe_mmio_write32(&gt->mmio, POWERGATE_ENABLE, gtidle->powergate_enable);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+}
- xe_mmio_write32(gt, POWERGATE_ENABLE, 0);
+/**
+ * xe_gt_idle_pg_print - Xe powergating info
+ * @gt: GT object
+ * @p: drm_printer.
+ *
+ * This function prints the powergating information
+ *
+ * Return: 0 on success, negative error code otherwise
+ */
+int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p)
+{
+ struct xe_gt_idle *gtidle = &gt->gtidle;
+ struct xe_device *xe = gt_to_xe(gt);
+ enum xe_gt_idle_state state;
+ u32 pg_enabled, pg_status = 0;
+ u32 vcs_mask, vecs_mask;
+ unsigned int fw_ref;
+ int n;
+ /*
+ * Media Slices
+ *
+ * Slice 0: VCS0, VCS1, VECS0
+ * Slice 1: VCS2, VCS3, VECS1
+ * Slice 2: VCS4, VCS5, VECS2
+ * Slice 3: VCS6, VCS7, VECS3
+ */
+ static const struct {
+ u64 engines;
+ u32 status_bit;
+ } media_slices[] = {
+ {(BIT(XE_HW_ENGINE_VCS0) | BIT(XE_HW_ENGINE_VCS1) |
+ BIT(XE_HW_ENGINE_VECS0)), MEDIA_SLICE0_AWAKE_STATUS},
+
+ {(BIT(XE_HW_ENGINE_VCS2) | BIT(XE_HW_ENGINE_VCS3) |
+ BIT(XE_HW_ENGINE_VECS1)), MEDIA_SLICE1_AWAKE_STATUS},
+
+ {(BIT(XE_HW_ENGINE_VCS4) | BIT(XE_HW_ENGINE_VCS5) |
+ BIT(XE_HW_ENGINE_VECS2)), MEDIA_SLICE2_AWAKE_STATUS},
+
+ {(BIT(XE_HW_ENGINE_VCS6) | BIT(XE_HW_ENGINE_VCS7) |
+ BIT(XE_HW_ENGINE_VECS3)), MEDIA_SLICE3_AWAKE_STATUS},
+ };
+
+ if (xe->info.platform == XE_PVC) {
+ drm_printf(p, "Power Gating not supported\n");
+ return 0;
+ }
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FW_GT));
+ state = gtidle->idle_status(gtidle_to_pc(gtidle));
+ pg_enabled = gtidle->powergate_enable;
+
+ /* Do not wake the GT to read powergating status */
+ if (state != GT_IDLE_C6) {
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
+
+ pg_enabled = xe_mmio_read32(&gt->mmio, POWERGATE_ENABLE);
+ pg_status = xe_mmio_read32(&gt->mmio, POWERGATE_DOMAIN_STATUS);
+
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ }
+
+ if (gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK) {
+ drm_printf(p, "Render Power Gating Enabled: %s\n",
+ str_yes_no(pg_enabled & RENDER_POWERGATE_ENABLE));
+
+ drm_printf(p, "Render Power Gate Status: %s\n",
+ str_up_down(pg_status & RENDER_AWAKE_STATUS));
+ }
+
+ vcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_DECODE);
+ vecs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE);
+
+ /* Print media CPG status only if media is present */
+ if (vcs_mask || vecs_mask) {
+ drm_printf(p, "Media Power Gating Enabled: %s\n",
+ str_yes_no(pg_enabled & MEDIA_POWERGATE_ENABLE));
+
+ for (n = 0; n < ARRAY_SIZE(media_slices); n++)
+ if (gt->info.engine_mask & media_slices[n].engines)
+ drm_printf(p, "Media Slice%d Power Gate Status: %s\n", n,
+ str_up_down(pg_status & media_slices[n].status_bit));
+ }
+ return 0;
}
-static ssize_t name_show(struct device *dev,
- struct device_attribute *attr, char *buff)
+static ssize_t name_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
ssize_t ret;
@@ -158,11 +263,12 @@ static ssize_t name_show(struct device *dev,
return ret;
}
-static DEVICE_ATTR_RO(name);
+static struct kobj_attribute name_attr = __ATTR_RO(name);
-static ssize_t idle_status_show(struct device *dev,
- struct device_attribute *attr, char *buff)
+static ssize_t idle_status_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
enum xe_gt_idle_state state;
@@ -173,27 +279,42 @@ static ssize_t idle_status_show(struct device *dev,
return sysfs_emit(buff, "%s\n", gt_idle_state_to_string(state));
}
-static DEVICE_ATTR_RO(idle_status);
+static struct kobj_attribute idle_status_attr = __ATTR_RO(idle_status);
+
+u64 xe_gt_idle_residency_msec(struct xe_gt_idle *gtidle)
+{
+ struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
+ u64 residency;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&gtidle->lock, flags);
+ residency = get_residency_ms(gtidle, gtidle->idle_residency(pc));
+ raw_spin_unlock_irqrestore(&gtidle->lock, flags);
+
+ return residency;
+}
-static ssize_t idle_residency_ms_show(struct device *dev,
- struct device_attribute *attr, char *buff)
+
+static ssize_t idle_residency_ms_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt_idle *gtidle = dev_to_gtidle(dev);
struct xe_guc_pc *pc = gtidle_to_pc(gtidle);
u64 residency;
xe_pm_runtime_get(pc_to_xe(pc));
- residency = gtidle->idle_residency(pc);
+ residency = xe_gt_idle_residency_msec(gtidle);
xe_pm_runtime_put(pc_to_xe(pc));
- return sysfs_emit(buff, "%llu\n", get_residency_ms(gtidle, residency));
+ return sysfs_emit(buff, "%llu\n", residency);
}
-static DEVICE_ATTR_RO(idle_residency_ms);
+static struct kobj_attribute idle_residency_attr = __ATTR_RO(idle_residency_ms);
static const struct attribute *gt_idle_attrs[] = {
- &dev_attr_name.attr,
- &dev_attr_idle_status.attr,
- &dev_attr_idle_residency_ms.attr,
+ &name_attr.attr,
+ &idle_status_attr.attr,
+ &idle_residency_attr.attr,
NULL,
};
@@ -201,13 +322,14 @@ static void gt_idle_fini(void *arg)
{
struct kobject *kobj = arg;
struct xe_gt *gt = kobj_to_gt(kobj->parent);
+ unsigned int fw_ref;
xe_gt_idle_disable_pg(gt);
if (gt_to_xe(gt)->info.skip_guc_pc) {
- XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
xe_gt_idle_disable_c6(gt);
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
sysfs_remove_files(kobj, gt_idle_attrs);
@@ -228,6 +350,8 @@ int xe_gt_idle_init(struct xe_gt_idle *gtidle)
if (!kobj)
return -ENOMEM;
+ raw_spin_lock_init(&gtidle->lock);
+
if (xe_gt_is_media_type(gt)) {
snprintf(gtidle->name, sizeof(gtidle->name), "gt%d-mc", gt->info.id);
gtidle->idle_residency = xe_guc_pc_mc6_residency;
@@ -260,9 +384,9 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt)
return;
/* Units of 1280 ns for a total of 5s */
- xe_mmio_write32(gt, RC_IDLE_HYSTERSIS, 0x3B9ACA);
+ xe_mmio_write32(&gt->mmio, RC_IDLE_HYSTERSIS, 0x3B9ACA);
/* Enable RC6 */
- xe_mmio_write32(gt, RC_CONTROL,
+ xe_mmio_write32(&gt->mmio, RC_CONTROL,
RC_CTL_HW_ENABLE | RC_CTL_TO_MODE | RC_CTL_RC6_ENABLE);
}
@@ -274,6 +398,6 @@ void xe_gt_idle_disable_c6(struct xe_gt *gt)
if (IS_SRIOV_VF(gt_to_xe(gt)))
return;
- xe_mmio_write32(gt, RC_CONTROL, 0);
- xe_mmio_write32(gt, RC_STATE, 0);
+ xe_mmio_write32(&gt->mmio, RC_CONTROL, 0);
+ xe_mmio_write32(&gt->mmio, RC_STATE, 0);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.h b/drivers/gpu/drm/xe/xe_gt_idle.h
index 554447b5d46d..591a01e181bc 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.h
+++ b/drivers/gpu/drm/xe/xe_gt_idle.h
@@ -8,6 +8,7 @@
#include "xe_gt_idle_types.h"
+struct drm_printer;
struct xe_gt;
int xe_gt_idle_init(struct xe_gt_idle *gtidle);
@@ -15,5 +16,7 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt);
void xe_gt_idle_disable_c6(struct xe_gt *gt);
void xe_gt_idle_enable_pg(struct xe_gt *gt);
void xe_gt_idle_disable_pg(struct xe_gt *gt);
+int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p);
+u64 xe_gt_idle_residency_msec(struct xe_gt_idle *gtidle);
#endif /* _XE_GT_IDLE_H_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_idle_types.h b/drivers/gpu/drm/xe/xe_gt_idle_types.h
index f99b447534f3..a3667c567f8a 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_idle_types.h
@@ -6,6 +6,7 @@
#ifndef _XE_GT_IDLE_SYSFS_TYPES_H_
#define _XE_GT_IDLE_SYSFS_TYPES_H_
+#include <linux/spinlock.h>
#include <linux/types.h>
struct xe_guc_pc;
@@ -23,12 +24,16 @@ enum xe_gt_idle_state {
struct xe_gt_idle {
/** @name: name */
char name[16];
+ /** @powergate_enable: copy of powergate enable bits */
+ u32 powergate_enable;
/** @residency_multiplier: residency multiplier in ns */
u32 residency_multiplier;
/** @cur_residency: raw driver copy of idle residency */
u64 cur_residency;
/** @prev_residency: previous residency counter */
u64 prev_residency;
+ /** @lock: Lock protecting idle residency counters */
+ raw_spinlock_t lock;
/** @idle_status: get the current idle state */
enum xe_gt_idle_state (*idle_status)(struct xe_guc_pc *pc);
/** @idle_residency: get idle residency counter */
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index c834f64b0178..d4d9730f0d2c 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -237,13 +237,26 @@ static const struct xe_mmio_range xe2lpm_instance0_steering_table[] = {
{},
};
+static const struct xe_mmio_range xe3lpm_instance0_steering_table[] = {
+ { 0x384000, 0x3847DF }, /* GAM, rsvd, GAM */
+ { 0x384900, 0x384AFF }, /* GAM */
+ { 0x389560, 0x3895FF }, /* MEDIAINF */
+ { 0x38B600, 0x38B8FF }, /* L3BANK */
+ { 0x38C800, 0x38D07F }, /* GAM, MEDIAINF */
+ { 0x38D0D0, 0x38F0FF }, /* MEDIAINF, GAM */
+ { 0x393C00, 0x393C7F }, /* MEDIAINF */
+ {},
+};
+
static void init_steering_l3bank(struct xe_gt *gt)
{
+ struct xe_mmio *mmio = &gt->mmio;
+
if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK,
- xe_mmio_read32(gt, MIRROR_FUSE3));
+ xe_mmio_read32(mmio, MIRROR_FUSE3));
u32 bank_mask = REG_FIELD_GET(GT_L3_EXC_MASK,
- xe_mmio_read32(gt, XEHP_FUSE4));
+ xe_mmio_read32(mmio, XEHP_FUSE4));
/*
* Group selects mslice, instance selects bank within mslice.
@@ -254,7 +267,7 @@ static void init_steering_l3bank(struct xe_gt *gt)
bank_mask & BIT(0) ? 0 : 2;
} else if (gt_to_xe(gt)->info.platform == XE_DG2) {
u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK,
- xe_mmio_read32(gt, MIRROR_FUSE3));
+ xe_mmio_read32(mmio, MIRROR_FUSE3));
u32 bank = __ffs(mslice_mask) * 8;
/*
@@ -266,7 +279,7 @@ static void init_steering_l3bank(struct xe_gt *gt)
gt->steering[L3BANK].instance_target = bank & 0x3;
} else {
u32 fuse = REG_FIELD_GET(L3BANK_MASK,
- ~xe_mmio_read32(gt, MIRROR_FUSE3));
+ ~xe_mmio_read32(mmio, MIRROR_FUSE3));
gt->steering[L3BANK].group_target = 0; /* unused */
gt->steering[L3BANK].instance_target = __ffs(fuse);
@@ -276,7 +289,7 @@ static void init_steering_l3bank(struct xe_gt *gt)
static void init_steering_mslice(struct xe_gt *gt)
{
u32 mask = REG_FIELD_GET(MEML3_EN_MASK,
- xe_mmio_read32(gt, MIRROR_FUSE3));
+ xe_mmio_read32(&gt->mmio, MIRROR_FUSE3));
/*
* mslice registers are valid (not terminated) if either the meml3
@@ -328,7 +341,14 @@ static unsigned int dss_per_group(struct xe_gt *gt)
return DIV_ROUND_UP(max_subslices, max_slices);
fallback:
- xe_gt_dbg(gt, "GuC hwconfig cannot provide dss/slice; using typical fallback values\n");
+ /*
+ * Some older platforms don't have tables or don't have complete tables.
+ * Newer platforms should always have the required info.
+ */
+ if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 2000 &&
+ !gt_to_xe(gt)->info.force_execlist)
+ xe_gt_err(gt, "Slice/Subslice counts missing from hwconfig table; using typical fallback values\n");
+
if (gt_to_xe(gt)->info.platform == XE_PVC)
return 8;
else if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250)
@@ -352,6 +372,19 @@ void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group,
*instance = dss % gt->steering_dss_per_grp;
}
+/**
+ * xe_gt_mcr_steering_info_to_dss_id - Get DSS ID from group/instance steering
+ * @gt: GT structure
+ * @group: steering group ID
+ * @instance: steering instance ID
+ *
+ * Return: the converted DSS id.
+ */
+u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance)
+{
+ return group * dss_per_group(gt) + instance;
+}
+
static void init_steering_dss(struct xe_gt *gt)
{
gt->steering_dss_per_grp = dss_per_group(gt);
@@ -380,7 +413,7 @@ static void init_steering_oaddrm(struct xe_gt *gt)
static void init_steering_sqidi_psmi(struct xe_gt *gt)
{
u32 mask = REG_FIELD_GET(XE2_NODE_ENABLE_MASK,
- xe_mmio_read32(gt, MIRROR_FUSE3));
+ xe_mmio_read32(&gt->mmio, MIRROR_FUSE3));
u32 select = __ffs(mask);
gt->steering[SQIDI_PSMI].group_target = select >> 1;
@@ -439,7 +472,10 @@ void xe_gt_mcr_init(struct xe_gt *gt)
if (gt->info.type == XE_GT_TYPE_MEDIA) {
drm_WARN_ON(&xe->drm, MEDIA_VER(xe) < 13);
- if (MEDIA_VERx100(xe) >= 1301) {
+ if (MEDIA_VER(xe) >= 30) {
+ gt->steering[OADDRM].ranges = xe2lpm_gpmxmt_steering_table;
+ gt->steering[INSTANCE0].ranges = xe3lpm_instance0_steering_table;
+ } else if (MEDIA_VERx100(xe) >= 1301) {
gt->steering[OADDRM].ranges = xe2lpm_gpmxmt_steering_table;
gt->steering[INSTANCE0].ranges = xe2lpm_instance0_steering_table;
} else {
@@ -494,8 +530,8 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
u32 steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, 0) |
REG_FIELD_PREP(MCR_SUBSLICE_MASK, 2);
- xe_mmio_write32(gt, MCFG_MCR_SELECTOR, steer_val);
- xe_mmio_write32(gt, SF_MCR_SELECTOR, steer_val);
+ xe_mmio_write32(&gt->mmio, MCFG_MCR_SELECTOR, steer_val);
+ xe_mmio_write32(&gt->mmio, SF_MCR_SELECTOR, steer_val);
/*
* For GAM registers, all reads should be directed to instance 1
* (unicast reads against other instances are not allowed),
@@ -521,9 +557,9 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
* Returns true if the caller should steer to the @group/@instance values
* returned. Returns false if the caller need not perform any steering
*/
-static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
- struct xe_reg_mcr reg_mcr,
- u8 *group, u8 *instance)
+bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
+ struct xe_reg_mcr reg_mcr,
+ u8 *group, u8 *instance)
{
const struct xe_reg reg = to_xe_reg(reg_mcr);
const struct xe_mmio_range *implicit_ranges;
@@ -533,7 +569,7 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
continue;
for (int i = 0; gt->steering[type].ranges[i].end > 0; i++) {
- if (xe_mmio_in_range(gt, &gt->steering[type].ranges[i], reg)) {
+ if (xe_mmio_in_range(&gt->mmio, &gt->steering[type].ranges[i], reg)) {
*group = gt->steering[type].group_target;
*instance = gt->steering[type].instance_target;
return true;
@@ -544,7 +580,7 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
implicit_ranges = gt->steering[IMPLICIT_STEERING].ranges;
if (implicit_ranges)
for (int i = 0; implicit_ranges[i].end > 0; i++)
- if (xe_mmio_in_range(gt, &implicit_ranges[i], reg))
+ if (xe_mmio_in_range(&gt->mmio, &implicit_ranges[i], reg))
return false;
/*
@@ -579,7 +615,7 @@ static void mcr_lock(struct xe_gt *gt) __acquires(&gt->mcr_lock)
* when a read to the relevant register returns 1.
*/
if (GRAPHICS_VERx100(xe) >= 1270)
- ret = xe_mmio_wait32(gt, STEER_SEMAPHORE, 0x1, 0x1, 10, NULL,
+ ret = xe_mmio_wait32(&gt->mmio, STEER_SEMAPHORE, 0x1, 0x1, 10, NULL,
true);
drm_WARN_ON_ONCE(&xe->drm, ret == -ETIMEDOUT);
@@ -589,7 +625,7 @@ static void mcr_unlock(struct xe_gt *gt) __releases(&gt->mcr_lock)
{
/* Release hardware semaphore - this is done by writing 1 to the register */
if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270)
- xe_mmio_write32(gt, STEER_SEMAPHORE, 0x1);
+ xe_mmio_write32(&gt->mmio, STEER_SEMAPHORE, 0x1);
spin_unlock(&gt->mcr_lock);
}
@@ -603,6 +639,7 @@ static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
u8 rw_flag, int group, int instance, u32 value)
{
const struct xe_reg reg = to_xe_reg(reg_mcr);
+ struct xe_mmio *mmio = &gt->mmio;
struct xe_reg steer_reg;
u32 steer_val, val = 0;
@@ -635,12 +672,12 @@ static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
if (rw_flag == MCR_OP_READ)
steer_val |= MCR_MULTICAST;
- xe_mmio_write32(gt, steer_reg, steer_val);
+ xe_mmio_write32(mmio, steer_reg, steer_val);
if (rw_flag == MCR_OP_READ)
- val = xe_mmio_read32(gt, reg);
+ val = xe_mmio_read32(mmio, reg);
else
- xe_mmio_write32(gt, reg, value);
+ xe_mmio_write32(mmio, reg, value);
/*
* If we turned off the multicast bit (during a write) we're required
@@ -649,7 +686,7 @@ static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
* operation.
*/
if (rw_flag == MCR_OP_WRITE)
- xe_mmio_write32(gt, steer_reg, MCR_MULTICAST);
+ xe_mmio_write32(mmio, steer_reg, MCR_MULTICAST);
return val;
}
@@ -684,7 +721,7 @@ u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, struct xe_reg_mcr reg_mcr)
group, instance, 0);
mcr_unlock(gt);
} else {
- val = xe_mmio_read32(gt, reg);
+ val = xe_mmio_read32(&gt->mmio, reg);
}
return val;
@@ -757,7 +794,7 @@ void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
* to touch the steering register.
*/
mcr_lock(gt);
- xe_mmio_write32(gt, reg, value);
+ xe_mmio_write32(&gt->mmio, reg, value);
mcr_unlock(gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.h b/drivers/gpu/drm/xe/xe_gt_mcr.h
index 8d119a0d5493..bc06520befab 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.h
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.h
@@ -26,8 +26,13 @@ void xe_gt_mcr_unicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr mcr_reg,
u32 value);
+bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
+ struct xe_reg_mcr reg_mcr,
+ u8 *group, u8 *instance);
+
void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance);
+u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance);
/*
* Loop over each DSS and determine the group and instance IDs that
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 79c426dc2505..10622ca471a2 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -10,15 +10,16 @@
#include <drm/drm_exec.h>
#include <drm/drm_managed.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include "abi/guc_actions_abi.h"
#include "xe_bo.h"
#include "xe_gt.h"
+#include "xe_gt_stats.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_migrate.h"
+#include "xe_svm.h"
#include "xe_trace_bo.h"
#include "xe_vm.h"
@@ -125,18 +126,22 @@ static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
return 0;
}
-static int handle_vma_pagefault(struct xe_tile *tile, struct pagefault *pf,
- struct xe_vma *vma)
+static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
+ bool atomic)
{
struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_tile *tile = gt_to_tile(gt);
struct drm_exec exec;
struct dma_fence *fence;
ktime_t end = 0;
int err;
- bool atomic;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT, 1);
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_KB, xe_vma_size(vma) / 1024);
trace_xe_vma_pagefault(vma);
- atomic = access_is_atomic(pf->access_type);
/* Check if VMA is valid */
if (vma_is_valid(tile, vma) && !atomic)
@@ -203,10 +208,10 @@ static struct xe_vm *asid_to_vm(struct xe_device *xe, u32 asid)
static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
{
struct xe_device *xe = gt_to_xe(gt);
- struct xe_tile *tile = gt_to_tile(gt);
struct xe_vm *vm;
struct xe_vma *vma = NULL;
int err;
+ bool atomic;
/* SW isn't expected to handle TRTT faults */
if (pf->trva_fault)
@@ -232,7 +237,13 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
goto unlock_vm;
}
- err = handle_vma_pagefault(tile, pf, vma);
+ atomic = access_is_atomic(pf->access_type);
+
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ err = xe_svm_handle_pagefault(vm, vma, gt,
+ pf->page_addr, atomic);
+ else
+ err = handle_vma_pagefault(gt, vma, atomic);
unlock_vm:
if (!err)
@@ -264,12 +275,13 @@ static void print_pagefault(struct xe_device *xe, struct pagefault *pf)
"\tFaultType: %d\n"
"\tAccessType: %d\n"
"\tFaultLevel: %d\n"
- "\tEngineClass: %d\n"
+ "\tEngineClass: %d %s\n"
"\tEngineInstance: %d\n",
pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr),
lower_32_bits(pf->page_addr),
pf->fault_type, pf->access_type, pf->fault_level,
- pf->engine_class, pf->engine_instance);
+ pf->engine_class, xe_hw_engine_class_to_str(pf->engine_class),
+ pf->engine_instance);
}
#define PF_MSG_LEN_DW 4
@@ -423,9 +435,16 @@ static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss,
XE_MAX_EU_FUSE_BITS) * num_dss;
- /* user can issue separate page faults per EU and per CS */
+ /*
+ * user can issue separate page faults per EU and per CS
+ *
+ * XXX: Multiplier required as compute UMD are getting PF queue errors
+ * without it. Follow on why this multiplier is required.
+ */
+#define PF_MULTIPLIER 8
pf_queue->num_dw =
- (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW;
+ (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER;
+#undef PF_MULTIPLIER
pf_queue->gt = gt;
pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw,
diff --git a/drivers/gpu/drm/xe/xe_gt_printk.h b/drivers/gpu/drm/xe/xe_gt_printk.h
index d6228baaff1e..11da0228cea7 100644
--- a/drivers/gpu/drm/xe/xe_gt_printk.h
+++ b/drivers/gpu/drm/xe/xe_gt_printk.h
@@ -8,7 +8,7 @@
#include <drm/drm_print.h>
-#include "xe_device_types.h"
+#include "xe_gt_types.h"
#define xe_gt_printk(_gt, _level, _fmt, ...) \
drm_##_level(&gt_to_xe(_gt)->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
@@ -60,6 +60,21 @@ static inline void __xe_gt_printfn_info(struct drm_printer *p, struct va_format
xe_gt_info(gt, "%pV", vaf);
}
+static inline void __xe_gt_printfn_dbg(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_gt *gt = p->arg;
+ struct drm_printer dbg;
+
+ /*
+ * The original xe_gt_dbg() callsite annotations are useless here,
+ * redirect to the tweaked drm_dbg_printer() instead.
+ */
+ dbg = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, NULL);
+ dbg.origin = p->origin;
+
+ drm_printf(&dbg, "GT%u: %pV", gt->info.id, vaf);
+}
+
/**
* xe_gt_err_printer - Construct a &drm_printer that outputs to xe_gt_err()
* @gt: the &xe_gt pointer to use in xe_gt_err()
@@ -90,4 +105,20 @@ static inline struct drm_printer xe_gt_info_printer(struct xe_gt *gt)
return p;
}
+/**
+ * xe_gt_dbg_printer - Construct a &drm_printer that outputs like xe_gt_dbg()
+ * @gt: the &xe_gt pointer to use in xe_gt_dbg()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_gt_dbg_printer(struct xe_gt *gt)
+{
+ struct drm_printer p = {
+ .printfn = __xe_gt_printfn_dbg,
+ .arg = gt,
+ .origin = (const void *)_THIS_IP_,
+ };
+ return p;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index 905f409db74b..c08efca6420e 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -5,14 +5,21 @@
#include <drm/drm_managed.h>
+#include "regs/xe_guc_regs.h"
#include "regs/xe_regs.h"
+#include "xe_gt.h"
#include "xe_gt_sriov_pf.h"
#include "xe_gt_sriov_pf_config.h"
#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_helpers.h"
+#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_pf_service.h"
+#include "xe_gt_sriov_printk.h"
#include "xe_mmio.h"
+#include "xe_pm.h"
+
+static void pf_worker_restart_func(struct work_struct *w);
/*
* VF's metadata is maintained in the flexible array where:
@@ -38,6 +45,11 @@ static int pf_alloc_metadata(struct xe_gt *gt)
return 0;
}
+static void pf_init_workers(struct xe_gt *gt)
+{
+ INIT_WORK(&gt->sriov.pf.workers.restart, pf_worker_restart_func);
+}
+
/**
* xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF.
* @gt: the &xe_gt to initialize
@@ -62,9 +74,30 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
if (err)
return err;
+ pf_init_workers(gt);
+
return 0;
}
+/**
+ * xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF.
+ * @gt: the &xe_gt to initialize
+ *
+ * Late one-time initialization of the PF data.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_init(struct xe_gt *gt)
+{
+ int err;
+
+ err = xe_gt_sriov_pf_config_init(gt);
+ if (err)
+ return err;
+
+ return xe_gt_sriov_pf_migration_init(gt);
+}
+
static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
{
return GRAPHICS_VERx100(xe) == 1200;
@@ -72,7 +105,7 @@ static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
static void pf_enable_ggtt_guest_update(struct xe_gt *gt)
{
- xe_mmio_write32(gt, VIRTUAL_CTRL_REG, GUEST_GTT_UPDATE_EN);
+ xe_mmio_write32(&gt->mmio, VIRTUAL_CTRL_REG, GUEST_GTT_UPDATE_EN);
}
/**
@@ -89,14 +122,92 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
xe_gt_sriov_pf_service_update(gt);
}
+static u32 pf_get_vf_regs_stride(struct xe_device *xe)
+{
+ return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000;
+}
+
+static struct xe_reg xe_reg_vf_to_pf(struct xe_reg vf_reg, unsigned int vfid, u32 stride)
+{
+ struct xe_reg pf_reg = vf_reg;
+
+ pf_reg.vf = 0;
+ pf_reg.addr += stride * vfid;
+
+ return pf_reg;
+}
+
+static void pf_clear_vf_scratch_regs(struct xe_gt *gt, unsigned int vfid)
+{
+ u32 stride = pf_get_vf_regs_stride(gt_to_xe(gt));
+ struct xe_reg scratch;
+ int n, count;
+
+ if (xe_gt_is_media_type(gt)) {
+ count = MED_VF_SW_FLAG_COUNT;
+ for (n = 0; n < count; n++) {
+ scratch = xe_reg_vf_to_pf(MED_VF_SW_FLAG(n), vfid, stride);
+ xe_mmio_write32(&gt->mmio, scratch, 0);
+ }
+ } else {
+ count = VF_SW_FLAG_COUNT;
+ for (n = 0; n < count; n++) {
+ scratch = xe_reg_vf_to_pf(VF_SW_FLAG(n), vfid, stride);
+ xe_mmio_write32(&gt->mmio, scratch, 0);
+ }
+ }
+}
+
/**
- * xe_gt_sriov_pf_restart - Restart SR-IOV support after a GT reset.
+ * xe_gt_sriov_pf_sanitize_hw() - Reset hardware state related to a VF.
* @gt: the &xe_gt
+ * @vfid: the VF identifier
*
* This function can only be called on PF.
*/
-void xe_gt_sriov_pf_restart(struct xe_gt *gt)
+void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ pf_clear_vf_scratch_regs(gt, vfid);
+}
+
+static void pf_restart(struct xe_gt *gt)
{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_pm_runtime_get(xe);
xe_gt_sriov_pf_config_restart(gt);
xe_gt_sriov_pf_control_restart(gt);
+ xe_pm_runtime_put(xe);
+
+ xe_gt_sriov_dbg(gt, "restart completed\n");
+}
+
+static void pf_worker_restart_func(struct work_struct *w)
+{
+ struct xe_gt *gt = container_of(w, typeof(*gt), sriov.pf.workers.restart);
+
+ pf_restart(gt);
+}
+
+static void pf_queue_restart(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+
+ if (!queue_work(xe->sriov.wq, &gt->sriov.pf.workers.restart))
+ xe_gt_sriov_dbg(gt, "restart already in queue!\n");
+}
+
+/**
+ * xe_gt_sriov_pf_restart - Restart SR-IOV support after a GT reset.
+ * @gt: the &xe_gt
+ *
+ * This function can only be called on PF.
+ */
+void xe_gt_sriov_pf_restart(struct xe_gt *gt)
+{
+ pf_queue_restart(gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
index f0cb726a6919..f474509411c0 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
@@ -10,7 +10,9 @@ struct xe_gt;
#ifdef CONFIG_PCI_IOV
int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
+int xe_gt_sriov_pf_init(struct xe_gt *gt);
void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
+void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
void xe_gt_sriov_pf_restart(struct xe_gt *gt);
#else
static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
@@ -18,6 +20,11 @@ static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
return 0;
}
+static inline int xe_gt_sriov_pf_init(struct xe_gt *gt)
+{
+ return 0;
+}
+
static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
{
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 8250ef71e685..2420a548cacc 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -20,6 +20,7 @@
#include "xe_gt_sriov_pf_policy.h"
#include "xe_gt_sriov_printk.h"
#include "xe_guc.h"
+#include "xe_guc_buf.h"
#include "xe_guc_ct.h"
#include "xe_guc_db_mgr.h"
#include "xe_guc_fwif.h"
@@ -34,6 +35,8 @@
#include "xe_ttm_vram_mgr.h"
#include "xe_wopcm.h"
+#define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
+
/*
* Return: number of KLVs that were successfully parsed and saved,
* negative error code on failure.
@@ -69,48 +72,27 @@ static int pf_send_vf_cfg_reset(struct xe_gt *gt, u32 vfid)
* Return: number of KLVs that were successfully parsed and saved,
* negative error code on failure.
*/
-static int pf_send_vf_cfg_klvs(struct xe_gt *gt, u32 vfid, const u32 *klvs, u32 num_dwords)
+static int pf_send_vf_buf_klvs(struct xe_gt *gt, u32 vfid, struct xe_guc_buf buf, u32 num_dwords)
{
- const u32 bytes = num_dwords * sizeof(u32);
- struct xe_tile *tile = gt_to_tile(gt);
- struct xe_device *xe = tile_to_xe(tile);
struct xe_guc *guc = &gt->uc.guc;
- struct xe_bo *bo;
- int ret;
-
- bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(bytes, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
- if (IS_ERR(bo))
- return PTR_ERR(bo);
-
- xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
- ret = guc_action_update_vf_cfg(guc, vfid, xe_bo_ggtt_addr(bo), num_dwords);
-
- xe_bo_unpin_map_no_vm(bo);
-
- return ret;
+ return guc_action_update_vf_cfg(guc, vfid, xe_guc_buf_flush(buf), num_dwords);
}
/*
* Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
* negative error code on failure.
*/
-static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
- const u32 *klvs, u32 num_dwords)
+static int pf_push_vf_buf_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
+ struct xe_guc_buf buf, u32 num_dwords)
{
int ret;
- xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
-
- ret = pf_send_vf_cfg_klvs(gt, vfid, klvs, num_dwords);
+ ret = pf_send_vf_buf_klvs(gt, vfid, buf, num_dwords);
if (ret != num_klvs) {
int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
+ void *klvs = xe_guc_buf_cpu_ptr(buf);
struct drm_printer p = xe_gt_info_printer(gt);
char name[8];
@@ -123,13 +105,35 @@ static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
struct drm_printer p = xe_gt_info_printer(gt);
+ void *klvs = xe_guc_buf_cpu_ptr(buf);
+ char name[8];
+ xe_gt_sriov_info(gt, "pushed %s config with %u KLV%s:\n",
+ xe_sriov_function_name(vfid, name, sizeof(name)),
+ num_klvs, str_plural(num_klvs));
xe_guc_klv_print(klvs, num_dwords, &p);
}
return 0;
}
+/*
+ * Return: 0 on success, -ENOBUFS if no free buffer for the indirect data,
+ * negative error code on failure.
+ */
+static int pf_push_vf_cfg_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs,
+ const u32 *klvs, u32 num_dwords)
+{
+ CLASS(xe_guc_buf_from_data, buf)(&gt->uc.guc.buf, klvs, num_dwords * sizeof(u32));
+
+ xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
+
+ if (!xe_guc_buf_is_valid(buf))
+ return -ENOBUFS;
+
+ return pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
+}
+
static int pf_push_vf_cfg_u32(struct xe_gt *gt, unsigned int vfid, u16 key, u32 value)
{
u32 klv[] = {
@@ -205,6 +209,11 @@ static int pf_push_vf_cfg_preempt_timeout(struct xe_gt *gt, unsigned int vfid, u
return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY, *preempt_timeout);
}
+static int pf_push_vf_cfg_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
+{
+ return pf_push_vf_cfg_u32(gt, vfid, GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY, priority);
+}
+
static int pf_push_vf_cfg_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
{
return pf_push_vf_cfg_u64(gt, vfid, GUC_KLV_VF_CFG_LMEM_SIZE_KEY, size);
@@ -229,14 +238,16 @@ static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned i
}
/* Return: number of configuration dwords written */
-static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config)
+static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
{
u32 n = 0;
if (xe_ggtt_node_allocated(config->ggtt_region)) {
- cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
- cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
- cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
+ if (details) {
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START);
+ cfg[n++] = lower_32_bits(config->ggtt_region->base.start);
+ cfg[n++] = upper_32_bits(config->ggtt_region->base.start);
+ }
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE);
cfg[n++] = lower_32_bits(config->ggtt_region->base.size);
@@ -247,20 +258,24 @@ static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config)
}
/* Return: number of configuration dwords written */
-static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config)
+static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details)
{
u32 n = 0;
- n += encode_config_ggtt(cfg, config);
+ n += encode_config_ggtt(cfg, config, details);
- cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
- cfg[n++] = config->begin_ctx;
+ if (details && config->num_ctxs) {
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_CONTEXT_ID);
+ cfg[n++] = config->begin_ctx;
+ }
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_CONTEXTS);
cfg[n++] = config->num_ctxs;
- cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
- cfg[n++] = config->begin_db;
+ if (details && config->num_dbs) {
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_BEGIN_DOORBELL_ID);
+ cfg[n++] = config->begin_db;
+ }
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_NUM_DOORBELLS);
cfg[n++] = config->num_dbs;
@@ -291,17 +306,18 @@ static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config)
static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
- u32 max_cfg_dwords = SZ_4K / sizeof(u32);
+ u32 max_cfg_dwords = xe_guc_buf_cache_dwords(&gt->uc.guc.buf);
+ CLASS(xe_guc_buf, buf)(&gt->uc.guc.buf, max_cfg_dwords);
u32 num_dwords;
int num_klvs;
u32 *cfg;
int err;
- cfg = kcalloc(max_cfg_dwords, sizeof(u32), GFP_KERNEL);
- if (!cfg)
- return -ENOMEM;
+ if (!xe_guc_buf_is_valid(buf))
+ return -ENOBUFS;
- num_dwords = encode_config(cfg, config);
+ cfg = xe_guc_buf_cpu_ptr(buf);
+ num_dwords = encode_config(cfg, config, true);
xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
if (xe_gt_is_media_type(gt)) {
@@ -309,20 +325,39 @@ static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid)
struct xe_gt_sriov_config *other = pf_pick_vf_config(primary, vfid);
/* media-GT will never include a GGTT config */
- xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config));
+ xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true));
/* the GGTT config must be taken from the primary-GT instead */
- num_dwords += encode_config_ggtt(cfg + num_dwords, other);
+ num_dwords += encode_config_ggtt(cfg + num_dwords, other, true);
}
xe_gt_assert(gt, num_dwords <= max_cfg_dwords);
num_klvs = xe_guc_klv_count(cfg, num_dwords);
- err = pf_push_vf_cfg_klvs(gt, vfid, num_klvs, cfg, num_dwords);
+ err = pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords);
+
+ return err;
+}
+
+static int pf_push_vf_cfg(struct xe_gt *gt, unsigned int vfid, bool reset)
+{
+ int err = 0;
+
+ xe_gt_assert(gt, vfid);
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (reset)
+ err = pf_send_vf_cfg_reset(gt, vfid);
+ if (!err)
+ err = pf_push_full_vf_config(gt, vfid);
- kfree(cfg);
return err;
}
+static int pf_refresh_vf_cfg(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_push_vf_cfg(gt, vfid, true);
+}
+
static u64 pf_get_ggtt_alignment(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -387,6 +422,8 @@ static void pf_release_ggtt(struct xe_tile *tile, struct xe_ggtt_node *node)
* the xe_ggtt_clear() called by below xe_ggtt_remove_node().
*/
xe_ggtt_node_remove(node, false);
+ } else {
+ xe_ggtt_node_fini(node);
}
}
@@ -417,6 +454,10 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
return err;
pf_release_vf_config_ggtt(gt, config);
+
+ err = pf_refresh_vf_cfg(gt, vfid);
+ if (unlikely(err))
+ return err;
}
xe_gt_assert(gt, !xe_ggtt_node_allocated(config->ggtt_region));
@@ -442,7 +483,7 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size)
config->ggtt_region = node;
return 0;
err:
- xe_ggtt_node_fini(node);
+ pf_release_ggtt(tile, node);
return err;
}
@@ -742,6 +783,10 @@ static int pf_provision_vf_ctxs(struct xe_gt *gt, unsigned int vfid, u32 num_ctx
return ret;
pf_release_config_ctxs(gt, config);
+
+ ret = pf_refresh_vf_cfg(gt, vfid);
+ if (unlikely(ret))
+ return ret;
}
if (!num_ctxs)
@@ -1039,6 +1084,10 @@ static int pf_provision_vf_dbs(struct xe_gt *gt, unsigned int vfid, u32 num_dbs)
return ret;
pf_release_config_dbs(gt, config);
+
+ ret = pf_refresh_vf_cfg(gt, vfid);
+ if (unlikely(ret))
+ return ret;
}
if (!num_dbs)
@@ -1287,7 +1336,7 @@ static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid)
struct xe_tile *tile;
unsigned int tid;
- xe_assert(xe, IS_DGFX(xe));
+ xe_assert(xe, xe_device_has_lmtt(xe));
xe_assert(xe, IS_SRIOV_PF(xe));
for_each_tile(tile, xe, tid) {
@@ -1308,7 +1357,7 @@ static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid)
unsigned int tid;
int err;
- xe_assert(xe, IS_DGFX(xe));
+ xe_assert(xe, xe_device_has_lmtt(xe));
xe_assert(xe, IS_SRIOV_PF(xe));
total = 0;
@@ -1385,7 +1434,8 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
if (unlikely(err))
return err;
- pf_reset_vf_lmtt(xe, vfid);
+ if (xe_device_has_lmtt(xe))
+ pf_reset_vf_lmtt(xe, vfid);
pf_release_vf_config_lmem(gt, config);
}
xe_gt_assert(gt, !config->lmem_obj);
@@ -1394,20 +1444,30 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
return 0;
xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
- bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(size, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_NEEDS_2M |
- XE_BO_FLAG_PINNED);
+ bo = xe_bo_create_locked(xe, tile, NULL,
+ ALIGN(size, PAGE_SIZE),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_NEEDS_2M |
+ XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_PINNED_LATE_RESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
+ err = xe_bo_pin(bo);
+ xe_bo_unlock(bo);
+ if (unlikely(err)) {
+ xe_bo_put(bo);
+ return err;
+ }
+
config->lmem_obj = bo;
- err = pf_update_vf_lmtt(xe, vfid);
- if (unlikely(err))
- goto release;
+ if (xe_device_has_lmtt(xe)) {
+ err = pf_update_vf_lmtt(xe, vfid);
+ if (unlikely(err))
+ goto release;
+ }
err = pf_push_vf_cfg_lmem(gt, vfid, bo->size);
if (unlikely(err))
@@ -1418,7 +1478,8 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
return 0;
reset_lmtt:
- pf_reset_vf_lmtt(xe, vfid);
+ if (xe_device_has_lmtt(xe))
+ pf_reset_vf_lmtt(xe, vfid);
release:
pf_release_vf_config_lmem(gt, config);
return err;
@@ -1511,7 +1572,7 @@ static u64 pf_query_free_lmem(struct xe_gt *gt)
{
struct xe_tile *tile = gt->tile;
- return xe_ttm_vram_get_avail(&tile->mem.vram_mgr->manager);
+ return xe_ttm_vram_get_avail(&tile->mem.vram.ttm.manager);
}
static u64 pf_query_max_lmem(struct xe_gt *gt)
@@ -1530,8 +1591,6 @@ static u64 pf_query_max_lmem(struct xe_gt *gt)
#ifdef CONFIG_DRM_XE_DEBUG_SRIOV
#define MAX_FAIR_LMEM SZ_128M /* XXX: make it small for the driver bringup */
-#else
-#define MAX_FAIR_LMEM SZ_2G /* XXX: known issue with allocating BO over 2GiB */
#endif
static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
@@ -1757,6 +1816,77 @@ u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfi
return preempt_timeout;
}
+static const char *sched_priority_unit(u32 priority)
+{
+ return priority == GUC_SCHED_PRIORITY_LOW ? "(low)" :
+ priority == GUC_SCHED_PRIORITY_NORMAL ? "(normal)" :
+ priority == GUC_SCHED_PRIORITY_HIGH ? "(high)" :
+ "(?)";
+}
+
+static int pf_provision_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ int err;
+
+ err = pf_push_vf_cfg_sched_priority(gt, vfid, priority);
+ if (unlikely(err))
+ return err;
+
+ config->sched_priority = priority;
+ return 0;
+}
+
+static int pf_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+
+ return config->sched_priority;
+}
+
+/**
+ * xe_gt_sriov_pf_config_set_sched_priority() - Configure scheduling priority.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @priority: requested scheduling priority
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority)
+{
+ int err;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_provision_sched_priority(gt, vfid, priority);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_config_set_u32_done(gt, vfid, priority,
+ xe_gt_sriov_pf_config_get_sched_priority(gt, vfid),
+ "scheduling priority", sched_priority_unit, err);
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_sched_priority - Get VF's scheduling priority.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function can only be called on PF.
+ *
+ * Return: VF's (or PF's) scheduling priority.
+ */
+u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid)
+{
+ u32 priority;
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ priority = pf_get_sched_priority(gt, vfid);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return priority;
+}
+
static void pf_reset_config_sched(struct xe_gt *gt, struct xe_gt_sriov_config *config)
{
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
@@ -1863,7 +1993,8 @@ static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
pf_release_vf_config_ggtt(gt, config);
if (IS_DGFX(xe)) {
pf_release_vf_config_lmem(gt, config);
- pf_update_vf_lmtt(xe, vfid);
+ if (xe_device_has_lmtt(xe))
+ pf_update_vf_lmtt(xe, vfid);
}
}
pf_release_config_ctxs(gt, config);
@@ -2001,10 +2132,7 @@ int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh
xe_gt_assert(gt, vfid);
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
- if (refresh)
- err = pf_send_vf_cfg_reset(gt, vfid);
- if (!err)
- err = pf_push_full_vf_config(gt, vfid);
+ err = pf_push_vf_cfg(gt, vfid, refresh);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
if (unlikely(err)) {
@@ -2036,13 +2164,13 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
valid_any = valid_any || (valid_ggtt && is_primary);
if (IS_DGFX(xe)) {
- bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid);
+ bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
valid_any = valid_any || (valid_lmem && is_primary);
valid_all = valid_all && valid_lmem;
}
- return valid_all ? 1 : valid_any ? -ENOKEY : -ENODATA;
+ return valid_all ? 0 : valid_any ? -ENOKEY : -ENODATA;
}
/**
@@ -2069,6 +2197,203 @@ bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid)
}
/**
+ * xe_gt_sriov_pf_config_save - Save a VF provisioning config as binary blob.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be PF)
+ * @buf: the buffer to save a config to (or NULL if query the buf size)
+ * @size: the size of the buffer (or 0 if query the buf size)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: minimum size of the buffer or the number of bytes saved,
+ * or a negative error code on failure.
+ */
+ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
+{
+ struct xe_gt_sriov_config *config;
+ ssize_t ret;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, !(!buf ^ !size));
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ ret = pf_validate_vf_config(gt, vfid);
+ if (!size) {
+ ret = ret ? 0 : SZ_4K;
+ } else if (!ret) {
+ if (size < SZ_4K) {
+ ret = -ENOBUFS;
+ } else {
+ config = pf_pick_vf_config(gt, vfid);
+ ret = encode_config(buf, config, false) * sizeof(u32);
+ }
+ }
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return ret;
+}
+
+static int pf_restore_vf_config_klv(struct xe_gt *gt, unsigned int vfid,
+ u32 key, u32 len, const u32 *value)
+{
+ switch (key) {
+ case GUC_KLV_VF_CFG_NUM_CONTEXTS_KEY:
+ if (len != GUC_KLV_VF_CFG_NUM_CONTEXTS_LEN)
+ return -EBADMSG;
+ return pf_provision_vf_ctxs(gt, vfid, value[0]);
+
+ case GUC_KLV_VF_CFG_NUM_DOORBELLS_KEY:
+ if (len != GUC_KLV_VF_CFG_NUM_DOORBELLS_LEN)
+ return -EBADMSG;
+ return pf_provision_vf_dbs(gt, vfid, value[0]);
+
+ case GUC_KLV_VF_CFG_EXEC_QUANTUM_KEY:
+ if (len != GUC_KLV_VF_CFG_EXEC_QUANTUM_LEN)
+ return -EBADMSG;
+ return pf_provision_exec_quantum(gt, vfid, value[0]);
+
+ case GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_KEY:
+ if (len != GUC_KLV_VF_CFG_PREEMPT_TIMEOUT_LEN)
+ return -EBADMSG;
+ return pf_provision_preempt_timeout(gt, vfid, value[0]);
+
+ /* auto-generate case statements */
+#define define_threshold_key_to_provision_case(TAG, ...) \
+ case MAKE_GUC_KLV_VF_CFG_THRESHOLD_KEY(TAG): \
+ BUILD_BUG_ON(MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) != 1u); \
+ if (len != MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG)) \
+ return -EBADMSG; \
+ return pf_provision_threshold(gt, vfid, \
+ MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG), \
+ value[0]);
+
+ MAKE_XE_GUC_KLV_THRESHOLDS_SET(define_threshold_key_to_provision_case)
+#undef define_threshold_key_to_provision_case
+ }
+
+ if (xe_gt_is_media_type(gt))
+ return -EKEYREJECTED;
+
+ switch (key) {
+ case GUC_KLV_VF_CFG_GGTT_SIZE_KEY:
+ if (len != GUC_KLV_VF_CFG_GGTT_SIZE_LEN)
+ return -EBADMSG;
+ return pf_provision_vf_ggtt(gt, vfid, make_u64_from_u32(value[1], value[0]));
+
+ case GUC_KLV_VF_CFG_LMEM_SIZE_KEY:
+ if (!IS_DGFX(gt_to_xe(gt)))
+ return -EKEYREJECTED;
+ if (len != GUC_KLV_VF_CFG_LMEM_SIZE_LEN)
+ return -EBADMSG;
+ return pf_provision_vf_lmem(gt, vfid, make_u64_from_u32(value[1], value[0]));
+ }
+
+ return -EKEYREJECTED;
+}
+
+static int pf_restore_vf_config(struct xe_gt *gt, unsigned int vfid,
+ const u32 *klvs, size_t num_dwords)
+{
+ int err;
+
+ while (num_dwords >= GUC_KLV_LEN_MIN) {
+ u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]);
+ u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
+
+ klvs += GUC_KLV_LEN_MIN;
+ num_dwords -= GUC_KLV_LEN_MIN;
+
+ if (num_dwords < len)
+ err = -EBADMSG;
+ else
+ err = pf_restore_vf_config_klv(gt, vfid, key, len, klvs);
+
+ if (err) {
+ xe_gt_sriov_dbg(gt, "restore failed on key %#x (%pe)\n", key, ERR_PTR(err));
+ return err;
+ }
+
+ klvs += len;
+ num_dwords -= len;
+ }
+
+ return pf_validate_vf_config(gt, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_config_restore - Restore a VF provisioning config from binary blob.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be PF)
+ * @buf: the buffer with config data
+ * @size: the size of the config data
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
+ const void *buf, size_t size)
+{
+ int err;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid);
+
+ if (!size)
+ return -ENODATA;
+
+ if (size % sizeof(u32))
+ return -EINVAL;
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
+ struct drm_printer p = xe_gt_info_printer(gt);
+
+ drm_printf(&p, "restoring VF%u config:\n", vfid);
+ xe_guc_klv_print(buf, size / sizeof(u32), &p);
+ }
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_send_vf_cfg_reset(gt, vfid);
+ if (!err) {
+ pf_release_vf_config(gt, vfid);
+ err = pf_restore_vf_config(gt, vfid, buf, size / sizeof(u32));
+ }
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ return err;
+}
+
+static void fini_config(void *arg)
+{
+ struct xe_gt *gt = arg;
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe);
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ for (n = 1; n <= total_vfs; n++)
+ pf_release_vf_config(gt, n);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+}
+
+/**
+ * xe_gt_sriov_pf_config_init - Initialize SR-IOV configuration data.
+ * @gt: the &xe_gt
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+
+ return devm_add_action_or_reset(xe->drm.dev, fini_config, gt);
+}
+
+/**
* xe_gt_sriov_pf_config_restart - Restart SR-IOV configurations after a GT reset.
* @gt: the &xe_gt
*
@@ -2201,6 +2526,41 @@ int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p)
}
/**
+ * xe_gt_sriov_pf_config_print_lmem - Print LMEM configurations.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * Print LMEM allocations across all VFs.
+ * VFs without LMEM allocation are skipped.
+ *
+ * This function can only be called on PF.
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p)
+{
+ unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
+ const struct xe_gt_sriov_config *config;
+ char buf[10];
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+
+ for (n = 1; n <= total_vfs; n++) {
+ config = &gt->sriov.pf.vfs[n].config;
+ if (!config->lmem_obj)
+ continue;
+
+ string_get_size(config->lmem_obj->size, 1, STRING_UNITS_2,
+ buf, sizeof(buf));
+ drm_printf(p, "VF%u:\t%zu\t(%s)\n",
+ n, config->lmem_obj->size, buf);
+ }
+
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+ return 0;
+}
+
+/**
* xe_gt_sriov_pf_config_print_available_ggtt - Print available GGTT ranges.
* @gt: the &xe_gt
* @p: the &drm_printer
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
index 42e64769f666..513e6512a575 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
@@ -44,6 +44,9 @@ u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfi
int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
u32 preempt_timeout);
+u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority);
+
u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
enum xe_guc_klv_threshold_index index);
int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
@@ -54,13 +57,19 @@ int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long tim
int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force);
int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh);
+ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size);
+int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
+ const void *buf, size_t size);
+
bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_init(struct xe_gt *gt);
void xe_gt_sriov_pf_config_restart(struct xe_gt *gt);
int xe_gt_sriov_pf_config_print_ggtt(struct xe_gt *gt, struct drm_printer *p);
int xe_gt_sriov_pf_config_print_ctxs(struct xe_gt *gt, struct drm_printer *p);
int xe_gt_sriov_pf_config_print_dbs(struct xe_gt *gt, struct drm_printer *p);
+int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p);
int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_printer *p);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
index 2d3b73d78f14..686c7b3b6d7a 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config_types.h
@@ -33,6 +33,8 @@ struct xe_gt_sriov_config {
u32 exec_quantum;
/** @preempt_timeout: preemption timeout in microseconds. */
u32 preempt_timeout;
+ /** @sched_priority: scheduling priority. */
+ u32 sched_priority;
/** @thresholds: GuC thresholds for adverse events notifications. */
u32 thresholds[XE_GUC_KLV_NUM_THRESHOLDS];
};
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
index 02f7328bd6ce..1f50aec3a059 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
@@ -9,9 +9,11 @@
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_sriov_pf.h"
#include "xe_gt_sriov_pf_config.h"
#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_helpers.h"
+#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_pf_monitor.h"
#include "xe_gt_sriov_pf_service.h"
#include "xe_gt_sriov_printk.h"
@@ -176,6 +178,7 @@ static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit)
CASE2STR(PAUSE_SEND_PAUSE);
CASE2STR(PAUSE_WAIT_GUC);
CASE2STR(PAUSE_GUC_DONE);
+ CASE2STR(PAUSE_SAVE_GUC);
CASE2STR(PAUSE_FAILED);
CASE2STR(PAUSED);
CASE2STR(RESUME_WIP);
@@ -415,6 +418,10 @@ static void pf_enter_vf_ready(struct xe_gt *gt, unsigned int vfid)
* : | : /
* : v : /
* : PAUSE_GUC_DONE o-----restart
+ * : | :
+ * : | o---<--busy :
+ * : v / / :
+ * : PAUSE_SAVE_GUC :
* : / :
* : / :
* :....o..............o...............o...........:
@@ -434,6 +441,7 @@ static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid)
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE);
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC);
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC);
}
}
@@ -464,12 +472,41 @@ static void pf_enter_vf_pause_rejected(struct xe_gt *gt, unsigned int vfid)
pf_enter_vf_pause_failed(gt, vfid);
}
+static void pf_enter_vf_pause_save_guc(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+}
+
+static bool pf_exit_vf_pause_save_guc(struct xe_gt *gt, unsigned int vfid)
+{
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC))
+ return false;
+
+ err = xe_gt_sriov_pf_migration_save_guc_state(gt, vfid);
+ if (err) {
+ /* retry if busy */
+ if (err == -EBUSY) {
+ pf_enter_vf_pause_save_guc(gt, vfid);
+ return true;
+ }
+ /* give up on error */
+ if (err == -EIO)
+ pf_enter_vf_mismatch(gt, vfid);
+ }
+
+ pf_enter_vf_pause_completed(gt, vfid);
+ return true;
+}
+
static bool pf_exit_vf_pause_guc_done(struct xe_gt *gt, unsigned int vfid)
{
if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE))
return false;
- pf_enter_vf_pause_completed(gt, vfid);
+ pf_enter_vf_pause_save_guc(gt, vfid);
return true;
}
@@ -1008,7 +1045,7 @@ static bool pf_exit_vf_flr_reset_mmio(struct xe_gt *gt, unsigned int vfid)
if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_MMIO))
return false;
- /* XXX: placeholder */
+ xe_gt_sriov_pf_sanitize_hw(gt, vfid);
pf_enter_vf_flr_send_finish(gt, vfid);
return true;
@@ -1338,6 +1375,9 @@ static bool pf_process_vf_state_machine(struct xe_gt *gt, unsigned int vfid)
if (pf_exit_vf_pause_guc_done(gt, vfid))
return true;
+ if (pf_exit_vf_pause_save_guc(gt, vfid))
+ return true;
+
if (pf_exit_vf_resume_send_resume(gt, vfid))
return true;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
index 11830aafea45..f02f941b4ad2 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
@@ -27,6 +27,7 @@
* @XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE: indicates that the PF is about to send a PAUSE command.
* @XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC: indicates that the PF awaits for a response from the GuC.
* @XE_GT_SRIOV_STATE_PAUSE_GUC_DONE: indicates that the PF has received a response from the GuC.
+ * @XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC: indicates that the PF needs to save the VF GuC state.
* @XE_GT_SRIOV_STATE_PAUSE_FAILED: indicates that a VF pause operation has failed.
* @XE_GT_SRIOV_STATE_PAUSED: indicates that the VF is paused.
* @XE_GT_SRIOV_STATE_RESUME_WIP: indicates the a VF resume operation is in progress.
@@ -56,6 +57,7 @@ enum xe_gt_sriov_control_bits {
XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE,
XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC,
XE_GT_SRIOV_STATE_PAUSE_GUC_DONE,
+ XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC,
XE_GT_SRIOV_STATE_PAUSE_FAILED,
XE_GT_SRIOV_STATE_PAUSED,
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
index 2290ddaf9594..0fe47f41b63c 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
@@ -17,6 +17,7 @@
#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_debugfs.h"
#include "xe_gt_sriov_pf_helpers.h"
+#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_pf_monitor.h"
#include "xe_gt_sriov_pf_policy.h"
#include "xe_gt_sriov_pf_service.h"
@@ -50,27 +51,18 @@ static unsigned int extract_vfid(struct dentry *d)
* /sys/kernel/debug/dri/0/
* ├── gt0
* │   ├── pf
- * │   │   ├── ggtt_available
- * │   │   ├── ggtt_provisioned
* │   │   ├── contexts_provisioned
* │   │   ├── doorbells_provisioned
* │   │   ├── runtime_registers
* │   │   ├── negotiated_versions
* │   │   ├── adverse_events
+ * ├── gt1
+ * │   ├── pf
+ * │   │   ├── ...
*/
static const struct drm_info_list pf_info[] = {
{
- "ggtt_available",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_available_ggtt,
- },
- {
- "ggtt_provisioned",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_ggtt,
- },
- {
"contexts_provisioned",
.show = xe_gt_debugfs_simple_show,
.data = xe_gt_sriov_pf_config_print_ctxs,
@@ -101,6 +93,42 @@ static const struct drm_info_list pf_info[] = {
* /sys/kernel/debug/dri/0/
* ├── gt0
* │   ├── pf
+ * │   │   ├── ggtt_available
+ * │   │   ├── ggtt_provisioned
+ */
+
+static const struct drm_info_list pf_ggtt_info[] = {
+ {
+ "ggtt_available",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_config_print_available_ggtt,
+ },
+ {
+ "ggtt_provisioned",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_config_print_ggtt,
+ },
+};
+
+/*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── pf
+ * │   │   ├── lmem_provisioned
+ */
+
+static const struct drm_info_list pf_lmem_info[] = {
+ {
+ "lmem_provisioned",
+ .show = xe_gt_debugfs_simple_show,
+ .data = xe_gt_sriov_pf_config_print_lmem,
+ },
+};
+
+/*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── pf
* │   │   ├── reset_engine
* │   │   ├── sample_period
* │   │   ├── sched_if_idle
@@ -158,6 +186,7 @@ static void pf_add_policy_attrs(struct xe_gt *gt, struct dentry *parent)
* │   │   ├── contexts_spare
* │   │   ├── exec_quantum_ms
* │   │   ├── preempt_timeout_us
+ * │   │   ├── sched_priority
* │   ├── vf1
* │   │   ├── ggtt_quota
* │   │   ├── lmem_quota
@@ -165,6 +194,7 @@ static void pf_add_policy_attrs(struct xe_gt *gt, struct dentry *parent)
* │   │   ├── contexts_quota
* │   │   ├── exec_quantum_ms
* │   │   ├── preempt_timeout_us
+ * │   │   ├── sched_priority
*/
#define DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(CONFIG, TYPE, FORMAT) \
@@ -203,6 +233,7 @@ DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(ctxs, u32, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(dbs, u32, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(exec_quantum, u32, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(preempt_timeout, u32, "%llu\n");
+DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(sched_priority, u32, "%llu\n");
/*
* /sys/kernel/debug/dri/0/
@@ -289,6 +320,8 @@ static void pf_add_config_attrs(struct xe_gt *gt, struct dentry *parent, unsigne
&exec_quantum_fops);
debugfs_create_file_unsafe("preempt_timeout_us", 0644, parent, parent,
&preempt_timeout_fops);
+ debugfs_create_file_unsafe("sched_priority", 0644, parent, parent,
+ &sched_priority_fops);
/* register all threshold attributes */
#define register_threshold_attribute(TAG, NAME, ...) \
@@ -312,6 +345,9 @@ static const struct {
{ "stop", xe_gt_sriov_pf_control_stop_vf },
{ "pause", xe_gt_sriov_pf_control_pause_vf },
{ "resume", xe_gt_sriov_pf_control_resume_vf },
+#ifdef CONFIG_DRM_XE_DEBUG_SRIOV
+ { "restore!", xe_gt_sriov_pf_migration_restore_guc_state },
+#endif
};
static ssize_t control_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
@@ -375,6 +411,119 @@ static const struct file_operations control_ops = {
.llseek = default_llseek,
};
+/*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── vf1
+ * │   │   ├── guc_state
+ */
+static ssize_t guc_state_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct dentry *dent = file_dentry(file);
+ struct dentry *parent = dent->d_parent;
+ struct xe_gt *gt = extract_gt(parent);
+ unsigned int vfid = extract_vfid(parent);
+
+ return xe_gt_sriov_pf_migration_read_guc_state(gt, vfid, buf, count, pos);
+}
+
+static ssize_t guc_state_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct dentry *dent = file_dentry(file);
+ struct dentry *parent = dent->d_parent;
+ struct xe_gt *gt = extract_gt(parent);
+ unsigned int vfid = extract_vfid(parent);
+
+ if (*pos)
+ return -EINVAL;
+
+ return xe_gt_sriov_pf_migration_write_guc_state(gt, vfid, buf, count);
+}
+
+static const struct file_operations guc_state_ops = {
+ .owner = THIS_MODULE,
+ .read = guc_state_read,
+ .write = guc_state_write,
+ .llseek = default_llseek,
+};
+
+/*
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0
+ * │   ├── vf1
+ * │   │   ├── config_blob
+ */
+static ssize_t config_blob_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct dentry *dent = file_dentry(file);
+ struct dentry *parent = dent->d_parent;
+ struct xe_gt *gt = extract_gt(parent);
+ unsigned int vfid = extract_vfid(parent);
+ ssize_t ret;
+ void *tmp;
+
+ ret = xe_gt_sriov_pf_config_save(gt, vfid, NULL, 0);
+ if (!ret)
+ return -ENODATA;
+ if (ret < 0)
+ return ret;
+
+ tmp = kzalloc(ret, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = xe_gt_sriov_pf_config_save(gt, vfid, tmp, ret);
+ if (ret > 0)
+ ret = simple_read_from_buffer(buf, count, pos, tmp, ret);
+
+ kfree(tmp);
+ return ret;
+}
+
+static ssize_t config_blob_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct dentry *dent = file_dentry(file);
+ struct dentry *parent = dent->d_parent;
+ struct xe_gt *gt = extract_gt(parent);
+ unsigned int vfid = extract_vfid(parent);
+ ssize_t ret;
+ void *tmp;
+
+ if (*pos)
+ return -EINVAL;
+
+ if (!count)
+ return -ENODATA;
+
+ if (count > SZ_4K)
+ return -EINVAL;
+
+ tmp = kzalloc(count, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ if (copy_from_user(tmp, buf, count)) {
+ ret = -EFAULT;
+ } else {
+ ret = xe_gt_sriov_pf_config_restore(gt, vfid, tmp, count);
+ if (!ret)
+ ret = count;
+ }
+ kfree(tmp);
+ return ret;
+}
+
+static const struct file_operations config_blob_ops = {
+ .owner = THIS_MODULE,
+ .read = config_blob_read,
+ .write = config_blob_write,
+ .llseek = default_llseek,
+};
+
/**
* xe_gt_sriov_pf_debugfs_register - Register SR-IOV PF specific entries in GT debugfs.
* @gt: the &xe_gt to register
@@ -405,6 +554,16 @@ void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root)
pfdentry->d_inode->i_private = gt;
drm_debugfs_create_files(pf_info, ARRAY_SIZE(pf_info), pfdentry, minor);
+ if (!xe_gt_is_media_type(gt)) {
+ drm_debugfs_create_files(pf_ggtt_info,
+ ARRAY_SIZE(pf_ggtt_info),
+ pfdentry, minor);
+ if (IS_DGFX(gt_to_xe(gt)))
+ drm_debugfs_create_files(pf_lmem_info,
+ ARRAY_SIZE(pf_lmem_info),
+ pfdentry, minor);
+ }
+
pf_add_policy_attrs(gt, pfdentry);
pf_add_config_attrs(gt, pfdentry, PFID);
@@ -423,5 +582,15 @@ void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root)
pf_add_config_attrs(gt, vfdentry, VFID(n));
debugfs_create_file("control", 0600, vfdentry, NULL, &control_ops);
+
+ /* for testing/debugging purposes only! */
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
+ debugfs_create_file("guc_state",
+ IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 0600 : 0400,
+ vfdentry, NULL, &guc_state_ops);
+ debugfs_create_file("config_blob",
+ IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 0600 : 0400,
+ vfdentry, NULL, &config_blob_ops);
+ }
}
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h
index 0bf12d89ceb2..6af219d93c3b 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_helpers.h
@@ -18,7 +18,7 @@
* is within a range of supported VF numbers (up to maximum number of VFs that
* driver can support, including VF0 that represents the PF itself).
*
- * Note: Effective only on debug builds. See `Xe ASSERTs`_ for more information.
+ * Note: Effective only on debug builds. See `Xe Asserts`_ for more information.
*/
#define xe_gt_sriov_pf_assert_vfid(gt, vfid) xe_sriov_pf_assert_vfid(gt_to_xe(gt), (vfid))
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
new file mode 100644
index 000000000000..c712111aa30d
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "abi/guc_actions_sriov_abi.h"
+#include "xe_bo.h"
+#include "xe_gt_sriov_pf_helpers.h"
+#include "xe_gt_sriov_pf_migration.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+#include "xe_sriov.h"
+
+/* Return: number of dwords saved/restored/required or a negative error code on failure */
+static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode,
+ u64 addr, u32 ndwords)
+{
+ u32 request[PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_LEN] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_PF2GUC_SAVE_RESTORE_VF) |
+ FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_0_OPCODE, opcode),
+ FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_1_VFID, vfid),
+ FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_2_ADDR_LO, lower_32_bits(addr)),
+ FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_3_ADDR_HI, upper_32_bits(addr)),
+ FIELD_PREP(PF2GUC_SAVE_RESTORE_VF_REQUEST_MSG_4_SIZE, ndwords),
+ };
+
+ return xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
+}
+
+/* Return: size of the state in dwords or a negative error code on failure */
+static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid)
+{
+ int ret;
+
+ ret = guc_action_vf_save_restore(&gt->uc.guc, vfid, GUC_PF_OPCODE_VF_SAVE, 0, 0);
+ return ret ?: -ENODATA;
+}
+
+/* Return: number of state dwords saved or a negative error code on failure */
+static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid,
+ void *buff, size_t size)
+{
+ const int ndwords = size / sizeof(u32);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_guc *guc = &gt->uc.guc;
+ struct xe_bo *bo;
+ int ret;
+
+ xe_gt_assert(gt, size % sizeof(u32) == 0);
+ xe_gt_assert(gt, size == ndwords * sizeof(u32));
+
+ bo = xe_bo_create_pin_map(xe, tile, NULL,
+ ALIGN(size, PAGE_SIZE),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_SAVE,
+ xe_bo_ggtt_addr(bo), ndwords);
+ if (!ret)
+ ret = -ENODATA;
+ else if (ret > ndwords)
+ ret = -EPROTO;
+ else if (ret > 0)
+ xe_map_memcpy_from(xe, buff, &bo->vmap, 0, ret * sizeof(u32));
+
+ xe_bo_unpin_map_no_vm(bo);
+ return ret;
+}
+
+/* Return: number of state dwords restored or a negative error code on failure */
+static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid,
+ const void *buff, size_t size)
+{
+ const int ndwords = size / sizeof(u32);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_guc *guc = &gt->uc.guc;
+ struct xe_bo *bo;
+ int ret;
+
+ xe_gt_assert(gt, size % sizeof(u32) == 0);
+ xe_gt_assert(gt, size == ndwords * sizeof(u32));
+
+ bo = xe_bo_create_pin_map(xe, tile, NULL,
+ ALIGN(size, PAGE_SIZE),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ xe_map_memcpy_to(xe, &bo->vmap, 0, buff, size);
+
+ ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_RESTORE,
+ xe_bo_ggtt_addr(bo), ndwords);
+ if (!ret)
+ ret = -ENODATA;
+ else if (ret > ndwords)
+ ret = -EPROTO;
+
+ xe_bo_unpin_map_no_vm(bo);
+ return ret;
+}
+
+static bool pf_migration_supported(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ return gt->sriov.pf.migration.supported;
+}
+
+static struct mutex *pf_migration_mutex(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ return &gt->sriov.pf.migration.snapshot_lock;
+}
+
+static struct xe_gt_sriov_state_snapshot *pf_pick_vf_snapshot(struct xe_gt *gt,
+ unsigned int vfid)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+ lockdep_assert_held(pf_migration_mutex(gt));
+
+ return &gt->sriov.pf.vfs[vfid].snapshot;
+}
+
+static unsigned int pf_snapshot_index(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
+{
+ return container_of(snapshot, struct xe_gt_sriov_metadata, snapshot) - gt->sriov.pf.vfs;
+}
+
+static void pf_free_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ drmm_kfree(&xe->drm, snapshot->guc.buff);
+ snapshot->guc.buff = NULL;
+ snapshot->guc.size = 0;
+}
+
+static int pf_alloc_guc_state(struct xe_gt *gt,
+ struct xe_gt_sriov_state_snapshot *snapshot,
+ size_t size)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ void *p;
+
+ pf_free_guc_state(gt, snapshot);
+
+ if (!size)
+ return -ENODATA;
+
+ if (size % sizeof(u32))
+ return -EINVAL;
+
+ if (size > SZ_2M)
+ return -EFBIG;
+
+ p = drmm_kzalloc(&xe->drm, size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ snapshot->guc.buff = p;
+ snapshot->guc.size = size;
+ return 0;
+}
+
+static void pf_dump_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
+{
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
+ unsigned int vfid __maybe_unused = pf_snapshot_index(gt, snapshot);
+
+ xe_gt_sriov_dbg_verbose(gt, "VF%u GuC state is %zu dwords:\n",
+ vfid, snapshot->guc.size / sizeof(u32));
+ print_hex_dump_bytes("state: ", DUMP_PREFIX_OFFSET,
+ snapshot->guc.buff, min(SZ_64, snapshot->guc.size));
+ }
+}
+
+static int pf_save_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid);
+ size_t size;
+ int ret;
+
+ ret = pf_send_guc_query_vf_state_size(gt, vfid);
+ if (ret < 0)
+ goto fail;
+ size = ret * sizeof(u32);
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state size is %d dwords (%zu bytes)\n", vfid, ret, size);
+
+ ret = pf_alloc_guc_state(gt, snapshot, size);
+ if (ret < 0)
+ goto fail;
+
+ ret = pf_send_guc_save_vf_state(gt, vfid, snapshot->guc.buff, size);
+ if (ret < 0)
+ goto fail;
+ size = ret * sizeof(u32);
+ xe_gt_assert(gt, size);
+ xe_gt_assert(gt, size <= snapshot->guc.size);
+ snapshot->guc.size = size;
+
+ pf_dump_guc_state(gt, snapshot);
+ return 0;
+
+fail:
+ xe_gt_sriov_dbg(gt, "Unable to save VF%u state (%pe)\n", vfid, ERR_PTR(ret));
+ pf_free_guc_state(gt, snapshot);
+ return ret;
+}
+
+/**
+ * xe_gt_sriov_pf_migration_save_guc_state() - Take a GuC VF state snapshot.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid)
+{
+ int err;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid != PFID);
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+
+ if (!pf_migration_supported(gt))
+ return -ENOPKG;
+
+ mutex_lock(pf_migration_mutex(gt));
+ err = pf_save_vf_guc_state(gt, vfid);
+ mutex_unlock(pf_migration_mutex(gt));
+
+ return err;
+}
+
+static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid);
+ int ret;
+
+ if (!snapshot->guc.size)
+ return -ENODATA;
+
+ xe_gt_sriov_dbg_verbose(gt, "restoring %zu dwords of VF%u GuC state\n",
+ snapshot->guc.size / sizeof(u32), vfid);
+ ret = pf_send_guc_restore_vf_state(gt, vfid, snapshot->guc.buff, snapshot->guc.size);
+ if (ret < 0)
+ goto fail;
+
+ xe_gt_sriov_dbg_verbose(gt, "restored %d dwords of VF%u GuC state\n", ret, vfid);
+ return 0;
+
+fail:
+ xe_gt_sriov_dbg(gt, "Failed to restore VF%u GuC state (%pe)\n", vfid, ERR_PTR(ret));
+ return ret;
+}
+
+/**
+ * xe_gt_sriov_pf_migration_restore_guc_state() - Restore a GuC VF state.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid)
+{
+ int ret;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid != PFID);
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+
+ if (!pf_migration_supported(gt))
+ return -ENOPKG;
+
+ mutex_lock(pf_migration_mutex(gt));
+ ret = pf_restore_vf_guc_state(gt, vfid);
+ mutex_unlock(pf_migration_mutex(gt));
+
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * xe_gt_sriov_pf_migration_read_guc_state() - Read a GuC VF state.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @buf: the user space buffer to read to
+ * @count: the maximum number of bytes to read
+ * @pos: the current position in the buffer
+ *
+ * This function is for PF only.
+ *
+ * This function reads up to @count bytes from the saved VF GuC state buffer
+ * at offset @pos into the user space address starting at @buf.
+ *
+ * Return: the number of bytes read or a negative error code on failure.
+ */
+ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid,
+ char __user *buf, size_t count, loff_t *pos)
+{
+ struct xe_gt_sriov_state_snapshot *snapshot;
+ ssize_t ret;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid != PFID);
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+
+ if (!pf_migration_supported(gt))
+ return -ENOPKG;
+
+ mutex_lock(pf_migration_mutex(gt));
+ snapshot = pf_pick_vf_snapshot(gt, vfid);
+ if (snapshot->guc.size)
+ ret = simple_read_from_buffer(buf, count, pos, snapshot->guc.buff,
+ snapshot->guc.size);
+ else
+ ret = -ENODATA;
+ mutex_unlock(pf_migration_mutex(gt));
+
+ return ret;
+}
+
+/**
+ * xe_gt_sriov_pf_migration_write_guc_state() - Write a GuC VF state.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @buf: the user space buffer with GuC VF state
+ * @size: the size of GuC VF state (in bytes)
+ *
+ * This function is for PF only.
+ *
+ * This function reads @size bytes of the VF GuC state stored at user space
+ * address @buf and writes it into a internal VF state buffer.
+ *
+ * Return: the number of bytes used or a negative error code on failure.
+ */
+ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid,
+ const char __user *buf, size_t size)
+{
+ struct xe_gt_sriov_state_snapshot *snapshot;
+ loff_t pos = 0;
+ ssize_t ret;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid != PFID);
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+
+ if (!pf_migration_supported(gt))
+ return -ENOPKG;
+
+ mutex_lock(pf_migration_mutex(gt));
+ snapshot = pf_pick_vf_snapshot(gt, vfid);
+ ret = pf_alloc_guc_state(gt, snapshot, size);
+ if (!ret) {
+ ret = simple_write_to_buffer(snapshot->guc.buff, size, &pos, buf, size);
+ if (ret < 0)
+ pf_free_guc_state(gt, snapshot);
+ else
+ pf_dump_guc_state(gt, snapshot);
+ }
+ mutex_unlock(pf_migration_mutex(gt));
+
+ return ret;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static bool pf_check_migration_support(struct xe_gt *gt)
+{
+ /* GuC 70.25 with save/restore v2 is required */
+ xe_gt_assert(gt, GUC_FIRMWARE_VER(&gt->uc.guc) >= MAKE_GUC_VER(70, 25, 0));
+
+ /* XXX: for now this is for feature enabling only */
+ return IS_ENABLED(CONFIG_DRM_XE_DEBUG);
+}
+
+/**
+ * xe_gt_sriov_pf_migration_init() - Initialize support for VF migration.
+ * @gt: the &xe_gt
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_migration_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int err;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+
+ gt->sriov.pf.migration.supported = pf_check_migration_support(gt);
+
+ if (!pf_migration_supported(gt))
+ return 0;
+
+ err = drmm_mutex_init(&xe->drm, &gt->sriov.pf.migration.snapshot_lock);
+ if (err)
+ return err;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
new file mode 100644
index 000000000000..09faeae00ddb
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_MIGRATION_H_
+#define _XE_GT_SRIOV_PF_MIGRATION_H_
+
+#include <linux/types.h>
+
+struct xe_gt;
+
+int xe_gt_sriov_pf_migration_init(struct xe_gt *gt);
+int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid);
+
+#ifdef CONFIG_DEBUG_FS
+ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid,
+ char __user *buf, size_t count, loff_t *pos);
+ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid,
+ const char __user *buf, size_t count);
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h
new file mode 100644
index 000000000000..1f3110b6d44f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_
+#define _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+/**
+ * struct xe_gt_sriov_state_snapshot - GT-level per-VF state snapshot data.
+ *
+ * Used by the PF driver to maintain per-VF migration data.
+ */
+struct xe_gt_sriov_state_snapshot {
+ /** @guc: GuC VF state snapshot */
+ struct {
+ /** @guc.buff: buffer with the VF state */
+ u32 *buff;
+ /** @guc.size: size of the buffer (must be dwords aligned) */
+ u32 size;
+ } guc;
+};
+
+/**
+ * struct xe_gt_sriov_pf_migration - GT-level data.
+ *
+ * Used by the PF driver to maintain non-VF specific per-GT data.
+ */
+struct xe_gt_sriov_pf_migration {
+ /** @supported: indicates whether the feature is supported */
+ bool supported;
+
+ /** @snapshot_lock: protects all VFs snapshots */
+ struct mutex snapshot_lock;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
index fae5be5a2a11..4445f660e6d1 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
@@ -10,6 +10,7 @@
#include "xe_gt_sriov_pf_helpers.h"
#include "xe_gt_sriov_pf_policy.h"
#include "xe_gt_sriov_printk.h"
+#include "xe_guc_buf.h"
#include "xe_guc_ct.h"
#include "xe_guc_klv_helpers.h"
#include "xe_pm.h"
@@ -34,48 +35,28 @@ static int guc_action_update_vgt_policy(struct xe_guc *guc, u64 addr, u32 size)
* Return: number of KLVs that were successfully parsed and saved,
* negative error code on failure.
*/
-static int pf_send_policy_klvs(struct xe_gt *gt, const u32 *klvs, u32 num_dwords)
+static int pf_send_policy_klvs(struct xe_gt *gt, struct xe_guc_buf buf, u32 num_dwords)
{
- const u32 bytes = num_dwords * sizeof(u32);
- struct xe_tile *tile = gt_to_tile(gt);
- struct xe_device *xe = tile_to_xe(tile);
struct xe_guc *guc = &gt->uc.guc;
- struct xe_bo *bo;
- int ret;
-
- bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(bytes, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_GGTT);
- if (IS_ERR(bo))
- return PTR_ERR(bo);
-
- xe_map_memcpy_to(xe, &bo->vmap, 0, klvs, bytes);
-
- ret = guc_action_update_vgt_policy(guc, xe_bo_ggtt_addr(bo), num_dwords);
- xe_bo_unpin_map_no_vm(bo);
-
- return ret;
+ return guc_action_update_vgt_policy(guc, xe_guc_buf_flush(buf), num_dwords);
}
/*
* Return: 0 on success, -ENOKEY if some KLVs were not updated, -EPROTO if reply was malformed,
* negative error code on failure.
*/
-static int pf_push_policy_klvs(struct xe_gt *gt, u32 num_klvs,
- const u32 *klvs, u32 num_dwords)
+static int pf_push_policy_buf_klvs(struct xe_gt *gt, u32 num_klvs,
+ struct xe_guc_buf buf, u32 num_dwords)
{
int ret;
- xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
-
- ret = pf_send_policy_klvs(gt, klvs, num_dwords);
+ ret = pf_send_policy_klvs(gt, buf, num_dwords);
if (ret != num_klvs) {
int err = ret < 0 ? ret : ret < num_klvs ? -ENOKEY : -EPROTO;
struct drm_printer p = xe_gt_info_printer(gt);
+ void *klvs = xe_guc_buf_cpu_ptr(buf);
xe_gt_sriov_notice(gt, "Failed to push %u policy KLV%s (%pe)\n",
num_klvs, str_plural(num_klvs), ERR_PTR(err));
@@ -86,6 +67,23 @@ static int pf_push_policy_klvs(struct xe_gt *gt, u32 num_klvs,
return 0;
}
+/*
+ * Return: 0 on success, -ENOBUFS if there is no free buffer for the indirect data,
+ * negative error code on failure.
+ */
+static int pf_push_policy_klvs(struct xe_gt *gt, u32 num_klvs,
+ const u32 *klvs, u32 num_dwords)
+{
+ CLASS(xe_guc_buf_from_data, buf)(&gt->uc.guc.buf, klvs, num_dwords * sizeof(u32));
+
+ xe_gt_assert(gt, num_klvs == xe_guc_klv_count(klvs, num_dwords));
+
+ if (!xe_guc_buf_is_valid(buf))
+ return -ENOBUFS;
+
+ return pf_push_policy_buf_klvs(gt, num_klvs, buf, num_dwords);
+}
+
static int pf_push_policy_u32(struct xe_gt *gt, u16 key, u32 value)
{
u32 klv[] = {
@@ -135,14 +133,33 @@ static int pf_update_policy_u32(struct xe_gt *gt, u16 key, u32 *policy, u32 valu
return 0;
}
+static void pf_bulk_reset_sched_priority(struct xe_gt *gt, u32 priority)
+{
+ unsigned int total_vfs = 1 + xe_gt_sriov_pf_get_totalvfs(gt);
+ unsigned int n;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ for (n = 0; n < total_vfs; n++)
+ gt->sriov.pf.vfs[n].config.sched_priority = priority;
+}
+
static int pf_provision_sched_if_idle(struct xe_gt *gt, bool enable)
{
+ int err;
+
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
- return pf_update_policy_bool(gt, GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY,
- &gt->sriov.pf.policy.guc.sched_if_idle,
- enable);
+ err = pf_update_policy_bool(gt, GUC_KLV_VGT_POLICY_SCHED_IF_IDLE_KEY,
+ &gt->sriov.pf.policy.guc.sched_if_idle,
+ enable);
+
+ if (!err)
+ pf_bulk_reset_sched_priority(gt, enable ? GUC_SCHED_PRIORITY_NORMAL :
+ GUC_SCHED_PRIORITY_LOW);
+ return err;
}
static int pf_reprovision_sched_if_idle(struct xe_gt *gt)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
index 0e23b7ea4f3e..821cfcc34e6b 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
@@ -112,9 +112,7 @@ static const struct xe_reg tgl_runtime_regs[] = {
XELP_GT_SLICE_ENABLE, /* _MMIO(0x9138) */
XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
- TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
};
static const struct xe_reg ats_m_runtime_regs[] = {
@@ -125,9 +123,7 @@ static const struct xe_reg ats_m_runtime_regs[] = {
XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
- TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
};
static const struct xe_reg pvc_runtime_regs[] = {
@@ -138,9 +134,7 @@ static const struct xe_reg pvc_runtime_regs[] = {
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
- CTC_MODE, /* _MMIO(0xA26C) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
- TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
};
static const struct xe_reg ver_1270_runtime_regs[] = {
@@ -153,9 +147,7 @@ static const struct xe_reg ver_1270_runtime_regs[] = {
GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
- TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
};
static const struct xe_reg ver_2000_runtime_regs[] = {
@@ -171,16 +163,34 @@ static const struct xe_reg ver_2000_runtime_regs[] = {
XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */
XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */
XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */
- CTC_MODE, /* _MMIO(0xa26c) */
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
- TIMESTAMP_OVERRIDE, /* _MMIO(0x44074) */
+};
+
+static const struct xe_reg ver_3000_runtime_regs[] = {
+ RPM_CONFIG0, /* _MMIO(0x0d00) */
+ XEHP_FUSE4, /* _MMIO(0x9114) */
+ MIRROR_FUSE3, /* _MMIO(0x9118) */
+ MIRROR_FUSE1, /* _MMIO(0x911c) */
+ MIRROR_L3BANK_ENABLE, /* _MMIO(0x9130) */
+ XELP_EU_ENABLE, /* _MMIO(0x9134) */
+ XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
+ GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
+ XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
+ XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
+ XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */
+ XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */
+ XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */
+ HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
static const struct xe_reg *pick_runtime_regs(struct xe_device *xe, unsigned int *count)
{
const struct xe_reg *regs;
- if (GRAPHICS_VERx100(xe) >= 2000) {
+ if (GRAPHICS_VERx100(xe) >= 3000) {
+ *count = ARRAY_SIZE(ver_3000_runtime_regs);
+ regs = ver_3000_runtime_regs;
+ } else if (GRAPHICS_VERx100(xe) >= 2000) {
*count = ARRAY_SIZE(ver_2000_runtime_regs);
regs = ver_2000_runtime_regs;
} else if (GRAPHICS_VERx100(xe) >= 1270) {
@@ -237,7 +247,7 @@ static void read_many(struct xe_gt *gt, unsigned int count,
const struct xe_reg *regs, u32 *values)
{
while (count--)
- *values++ = xe_mmio_read32(gt, *regs++);
+ *values++ = xe_mmio_read32(&gt->mmio, *regs++);
}
static void pf_prepare_runtime_info(struct xe_gt *gt)
@@ -402,7 +412,7 @@ static int pf_service_runtime_query(struct xe_gt *gt, u32 start, u32 limit,
for (i = 0; i < count; ++i, ++data) {
addr = runtime->regs[start + i].addr;
- data->offset = xe_mmio_adjusted_addr(gt, addr);
+ data->offset = xe_mmio_adjusted_addr(&gt->mmio, addr);
data->value = runtime->values[start + i];
}
@@ -513,7 +523,7 @@ int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p
for (; size--; regs++, values++) {
drm_printf(p, "reg[%#x] = %#x\n",
- xe_mmio_adjusted_addr(gt, regs->addr), *values);
+ xe_mmio_adjusted_addr(&gt->mmio, regs->addr), *values);
}
return 0;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
index 28e1b130bf87..a64a6835ad65 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
@@ -10,6 +10,7 @@
#include "xe_gt_sriov_pf_config_types.h"
#include "xe_gt_sriov_pf_control_types.h"
+#include "xe_gt_sriov_pf_migration_types.h"
#include "xe_gt_sriov_pf_monitor_types.h"
#include "xe_gt_sriov_pf_policy_types.h"
#include "xe_gt_sriov_pf_service_types.h"
@@ -29,20 +30,35 @@ struct xe_gt_sriov_metadata {
/** @version: negotiated VF/PF ABI version */
struct xe_gt_sriov_pf_service_version version;
+
+ /** @snapshot: snapshot of the VF state data */
+ struct xe_gt_sriov_state_snapshot snapshot;
+};
+
+/**
+ * struct xe_gt_sriov_pf_workers - GT level workers used by the PF.
+ */
+struct xe_gt_sriov_pf_workers {
+ /** @restart: worker that executes actions post GT reset */
+ struct work_struct restart;
};
/**
* struct xe_gt_sriov_pf - GT level PF virtualization data.
+ * @workers: workers data.
* @service: service data.
* @control: control data.
* @policy: policy data.
+ * @migration: migration data.
* @spare: PF-only provisioning configuration.
* @vfs: metadata for all VFs.
*/
struct xe_gt_sriov_pf {
+ struct xe_gt_sriov_pf_workers workers;
struct xe_gt_sriov_pf_service service;
struct xe_gt_sriov_pf_control control;
struct xe_gt_sriov_pf_policy policy;
+ struct xe_gt_sriov_pf_migration migration;
struct xe_gt_sriov_spare_config spare;
struct xe_gt_sriov_metadata *vfs;
};
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index 4ebc82e607af..a439261bf4d7 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -27,6 +27,7 @@
#include "xe_guc_relay.h"
#include "xe_mmio.h"
#include "xe_sriov.h"
+#include "xe_sriov_vf.h"
#include "xe_uc_fw.h"
#include "xe_wopcm.h"
@@ -46,17 +47,40 @@ static int guc_action_vf_reset(struct xe_guc *guc)
return ret > 0 ? -EPROTO : ret;
}
+#define GUC_RESET_VF_STATE_RETRY_MAX 10
static int vf_reset_guc_state(struct xe_gt *gt)
{
+ unsigned int retry = GUC_RESET_VF_STATE_RETRY_MAX;
struct xe_guc *guc = &gt->uc.guc;
int err;
- err = guc_action_vf_reset(guc);
+ do {
+ err = guc_action_vf_reset(guc);
+ if (!err || err != -ETIMEDOUT)
+ break;
+ } while (--retry);
+
if (unlikely(err))
xe_gt_sriov_err(gt, "Failed to reset GuC state (%pe)\n", ERR_PTR(err));
return err;
}
+/**
+ * xe_gt_sriov_vf_reset - Reset GuC VF internal state.
+ * @gt: the &xe_gt
+ *
+ * It requires functional `GuC MMIO based communication`_.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_vf_reset(struct xe_gt *gt)
+{
+ if (!xe_device_uc_enabled(gt_to_xe(gt)))
+ return -ENODEV;
+
+ return vf_reset_guc_state(gt);
+}
+
static int guc_action_match_version(struct xe_guc *guc,
u32 wanted_branch, u32 wanted_major, u32 wanted_minor,
u32 *branch, u32 *major, u32 *minor, u32 *patch)
@@ -212,6 +236,9 @@ int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt)
{
int err;
+ if (!xe_device_uc_enabled(gt_to_xe(gt)))
+ return -ENODEV;
+
err = vf_reset_guc_state(gt);
if (unlikely(err))
return err;
@@ -223,6 +250,44 @@ int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt)
return 0;
}
+static int guc_action_vf_notify_resfix_done(struct xe_guc *guc)
+{
+ u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = {
+ FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+ FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_VF2GUC_NOTIFY_RESFIX_DONE),
+ };
+ int ret;
+
+ ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request));
+
+ return ret > 0 ? -EPROTO : ret;
+}
+
+/**
+ * xe_gt_sriov_vf_notify_resfix_done - Notify GuC about resource fixups apply completed.
+ * @gt: the &xe_gt struct instance linked to target GuC
+ *
+ * Returns: 0 if the operation completed successfully, or a negative error
+ * code otherwise.
+ */
+int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt)
+{
+ struct xe_guc *guc = &gt->uc.guc;
+ int err;
+
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+
+ err = guc_action_vf_notify_resfix_done(guc);
+ if (unlikely(err))
+ xe_gt_sriov_err(gt, "Failed to notify GuC about resource fixup done (%pe)\n",
+ ERR_PTR(err));
+ else
+ xe_gt_sriov_dbg_verbose(gt, "sent GuC resource fixup done\n");
+
+ return err;
+}
+
static int guc_action_query_single_klv(struct xe_guc *guc, u32 key,
u32 *value, u32 value_len)
{
@@ -692,6 +757,30 @@ failed:
return err;
}
+/**
+ * xe_gt_sriov_vf_migrated_event_handler - Start a VF migration recovery,
+ * or just mark that a GuC is ready for it.
+ * @gt: the &xe_gt struct instance linked to target GuC
+ *
+ * This function shall be called only by VF.
+ */
+void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_gt_assert(gt, IS_SRIOV_VF(xe));
+
+ set_bit(gt->info.id, &xe->sriov.vf.migration.gt_flags);
+ /*
+ * We need to be certain that if all flags were set, at least one
+ * thread will notice that and schedule the recovery.
+ */
+ smp_mb__after_atomic();
+
+ xe_gt_sriov_info(gt, "ready for recovery after migration\n");
+ xe_sriov_vf_start_migration_recovery(xe);
+}
+
static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor)
{
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
@@ -881,7 +970,7 @@ static struct vf_runtime_reg *vf_lookup_reg(struct xe_gt *gt, u32 addr)
*/
u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg)
{
- u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u32 addr = xe_mmio_adjusted_addr(&gt->mmio, reg.addr);
struct vf_runtime_reg *rr;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
@@ -917,7 +1006,7 @@ u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg)
*/
void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
{
- u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u32 addr = xe_mmio_adjusted_addr(&gt->mmio, reg.addr);
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
xe_gt_assert(gt, !reg.vf);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index e541ce57bec2..ba6c5d74e326 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -12,11 +12,14 @@ struct drm_printer;
struct xe_gt;
struct xe_reg;
+int xe_gt_sriov_vf_reset(struct xe_gt *gt);
int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt);
int xe_gt_sriov_vf_query_config(struct xe_gt *gt);
int xe_gt_sriov_vf_connect(struct xe_gt *gt);
int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt);
int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt);
+int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt);
+void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);
u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c
index f3ddcbefc6bc..2ed5b6780d30 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_debugfs.c
@@ -33,7 +33,7 @@ static const struct drm_info_list vf_info[] = {
.show = xe_gt_debugfs_simple_show,
.data = xe_gt_sriov_vf_print_version,
},
-#if defined(CONFIG_DRM_XE_DEBUG) || defined(CONFIG_DRM_XE_DEBUG_SRIOV)
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) || IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)
{
"runtime_regs",
.show = xe_gt_debugfs_simple_show,
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c
index c7364a5aef8f..30f942671c2b 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.c
+++ b/drivers/gpu/drm/xe/xe_gt_stats.c
@@ -12,7 +12,7 @@
/**
* xe_gt_stats_incr - Increments the specified stats counter
- * @gt: graphics tile
+ * @gt: GT structure
* @id: xe_gt_stats_id type id that needs to be incremented
* @incr: value to be incremented with
*
@@ -23,16 +23,19 @@ void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr)
if (id >= __XE_GT_STATS_NUM_IDS)
return;
- atomic_add(incr, &gt->stats.counters[id]);
+ atomic64_add(incr, &gt->stats.counters[id]);
}
static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
+ "svm_pagefault_count",
"tlb_inval_count",
+ "vma_pagefault_count",
+ "vma_pagefault_kb",
};
/**
* xe_gt_stats_print_info - Print the GT stats
- * @gt: graphics tile
+ * @gt: GT structure
* @p: drm_printer where it will be printed out.
*
* This prints out all the available GT stats.
@@ -42,8 +45,8 @@ int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p)
enum xe_gt_stats_id id;
for (id = 0; id < __XE_GT_STATS_NUM_IDS; ++id)
- drm_printf(p, "%s: %d\n", stat_description[id],
- atomic_read(&gt->stats.counters[id]));
+ drm_printf(p, "%s: %lld\n", stat_description[id],
+ atomic64_read(&gt->stats.counters[id]));
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.h b/drivers/gpu/drm/xe/xe_gt_stats.h
index 91d944f6c4e4..38325ef53617 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats.h
@@ -6,15 +6,11 @@
#ifndef _XE_GT_STATS_H_
#define _XE_GT_STATS_H_
+#include "xe_gt_stats_types.h"
+
struct xe_gt;
struct drm_printer;
-enum xe_gt_stats_id {
- XE_GT_STATS_ID_TLB_INVAL,
- /* must be the last entry */
- __XE_GT_STATS_NUM_IDS,
-};
-
#ifdef CONFIG_DEBUG_FS
int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr);
diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h
new file mode 100644
index 000000000000..be3244d7133c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_STATS_TYPES_H_
+#define _XE_GT_STATS_TYPES_H_
+
+enum xe_gt_stats_id {
+ XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_TLB_INVAL,
+ XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_VMA_PAGEFAULT_KB,
+ /* must be the last entry */
+ __XE_GT_STATS_NUM_IDS,
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_throttle.c b/drivers/gpu/drm/xe/xe_gt_throttle.c
index 25963e33a383..aa962c783cdf 100644
--- a/drivers/gpu/drm/xe/xe_gt_throttle.c
+++ b/drivers/gpu/drm/xe/xe_gt_throttle.c
@@ -8,6 +8,7 @@
#include <regs/xe_gt_regs.h>
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_gt_sysfs.h"
#include "xe_gt_throttle.h"
#include "xe_mmio.h"
@@ -41,9 +42,9 @@ u32 xe_gt_throttle_get_limit_reasons(struct xe_gt *gt)
xe_pm_runtime_get(gt_to_xe(gt));
if (xe_gt_is_media_type(gt))
- reg = xe_mmio_read32(gt, MTL_MEDIA_PERF_LIMIT_REASONS);
+ reg = xe_mmio_read32(&gt->mmio, MTL_MEDIA_PERF_LIMIT_REASONS);
else
- reg = xe_mmio_read32(gt, GT0_PERF_LIMIT_REASONS);
+ reg = xe_mmio_read32(&gt->mmio, GT0_PERF_LIMIT_REASONS);
xe_pm_runtime_put(gt_to_xe(gt));
return reg;
@@ -53,6 +54,7 @@ static u32 read_status(struct xe_gt *gt)
{
u32 status = xe_gt_throttle_get_limit_reasons(gt) & GT0_PERF_LIMIT_REASONS_MASK;
+ xe_gt_dbg(gt, "throttle reasons: 0x%08x\n", status);
return status;
}
@@ -112,115 +114,115 @@ static u32 read_reason_vr_tdc(struct xe_gt *gt)
return tdc;
}
-static ssize_t status_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t status_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool status = !!read_status(gt);
return sysfs_emit(buff, "%u\n", status);
}
-static DEVICE_ATTR_RO(status);
+static struct kobj_attribute attr_status = __ATTR_RO(status);
-static ssize_t reason_pl1_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_pl1_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool pl1 = !!read_reason_pl1(gt);
return sysfs_emit(buff, "%u\n", pl1);
}
-static DEVICE_ATTR_RO(reason_pl1);
+static struct kobj_attribute attr_reason_pl1 = __ATTR_RO(reason_pl1);
-static ssize_t reason_pl2_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_pl2_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool pl2 = !!read_reason_pl2(gt);
return sysfs_emit(buff, "%u\n", pl2);
}
-static DEVICE_ATTR_RO(reason_pl2);
+static struct kobj_attribute attr_reason_pl2 = __ATTR_RO(reason_pl2);
-static ssize_t reason_pl4_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_pl4_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool pl4 = !!read_reason_pl4(gt);
return sysfs_emit(buff, "%u\n", pl4);
}
-static DEVICE_ATTR_RO(reason_pl4);
+static struct kobj_attribute attr_reason_pl4 = __ATTR_RO(reason_pl4);
-static ssize_t reason_thermal_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_thermal_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool thermal = !!read_reason_thermal(gt);
return sysfs_emit(buff, "%u\n", thermal);
}
-static DEVICE_ATTR_RO(reason_thermal);
+static struct kobj_attribute attr_reason_thermal = __ATTR_RO(reason_thermal);
-static ssize_t reason_prochot_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_prochot_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool prochot = !!read_reason_prochot(gt);
return sysfs_emit(buff, "%u\n", prochot);
}
-static DEVICE_ATTR_RO(reason_prochot);
+static struct kobj_attribute attr_reason_prochot = __ATTR_RO(reason_prochot);
-static ssize_t reason_ratl_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_ratl_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool ratl = !!read_reason_ratl(gt);
return sysfs_emit(buff, "%u\n", ratl);
}
-static DEVICE_ATTR_RO(reason_ratl);
+static struct kobj_attribute attr_reason_ratl = __ATTR_RO(reason_ratl);
-static ssize_t reason_vr_thermalert_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_vr_thermalert_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool thermalert = !!read_reason_vr_thermalert(gt);
return sysfs_emit(buff, "%u\n", thermalert);
}
-static DEVICE_ATTR_RO(reason_vr_thermalert);
+static struct kobj_attribute attr_reason_vr_thermalert = __ATTR_RO(reason_vr_thermalert);
-static ssize_t reason_vr_tdc_show(struct device *dev,
- struct device_attribute *attr,
- char *buff)
+static ssize_t reason_vr_tdc_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_gt *gt = dev_to_gt(dev);
bool tdc = !!read_reason_vr_tdc(gt);
return sysfs_emit(buff, "%u\n", tdc);
}
-static DEVICE_ATTR_RO(reason_vr_tdc);
+static struct kobj_attribute attr_reason_vr_tdc = __ATTR_RO(reason_vr_tdc);
static struct attribute *throttle_attrs[] = {
- &dev_attr_status.attr,
- &dev_attr_reason_pl1.attr,
- &dev_attr_reason_pl2.attr,
- &dev_attr_reason_pl4.attr,
- &dev_attr_reason_thermal.attr,
- &dev_attr_reason_prochot.attr,
- &dev_attr_reason_ratl.attr,
- &dev_attr_reason_vr_thermalert.attr,
- &dev_attr_reason_vr_tdc.attr,
+ &attr_status.attr,
+ &attr_reason_pl1.attr,
+ &attr_reason_pl2.attr,
+ &attr_reason_pl4.attr,
+ &attr_reason_thermal.attr,
+ &attr_reason_prochot.attr,
+ &attr_reason_ratl.attr,
+ &attr_reason_vr_thermalert.attr,
+ &attr_reason_vr_tdc.attr,
NULL
};
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index bbb9e411d21f..e1362e608146 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -65,6 +65,14 @@ invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fe
__invalidation_fence_signal(xe, fence);
}
+void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
+{
+ if (WARN_ON_ONCE(!fence->gt))
+ return;
+
+ __invalidation_fence_signal(gt_to_xe(fence->gt), fence);
+}
+
static void xe_gt_tlb_fence_timeout(struct work_struct *work)
{
struct xe_gt *gt = container_of(work, struct xe_gt,
@@ -72,6 +80,8 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
struct xe_device *xe = gt_to_xe(gt);
struct xe_gt_tlb_invalidation_fence *fence, *next;
+ LNL_FLUSH_WORK(&gt->uc.guc.ct.g2h_worker);
+
spin_lock_irq(&gt->tlb_invalidation.pending_lock);
list_for_each_entry_safe(fence, next,
&gt->tlb_invalidation.pending_fences, link) {
@@ -96,15 +106,15 @@ static void xe_gt_tlb_fence_timeout(struct work_struct *work)
}
/**
- * xe_gt_tlb_invalidation_init - Initialize GT TLB invalidation state
- * @gt: graphics tile
+ * xe_gt_tlb_invalidation_init_early - Initialize GT TLB invalidation state
+ * @gt: GT structure
*
* Initialize GT TLB invalidation state, purely software initialization, should
* be called once during driver load.
*
* Return: 0 on success, negative error code on error.
*/
-int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
+int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt)
{
gt->tlb_invalidation.seqno = 1;
INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
@@ -118,7 +128,7 @@ int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
/**
* xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
- * @gt: graphics tile
+ * @gt: GT structure
*
* Signal any pending invalidation fences, should be called during a GT reset
*/
@@ -128,6 +138,14 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
int pending_seqno;
/*
+ * we can get here before the CTs are even initialized if we're wedging
+ * very early, in which case there are not going to be any pending
+ * fences so we can bail immediately.
+ */
+ if (!xe_guc_ct_initialized(&gt->uc.guc.ct))
+ return;
+
+ /*
* CT channel is already disabled at this point. No new TLB requests can
* appear.
*/
@@ -234,7 +252,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
/**
* xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
- * @gt: graphics tile
+ * @gt: GT structure
* @fence: invalidation fence which will be signal on TLB invalidation
* completion
*
@@ -251,14 +269,23 @@ static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
0, /* seqno, replaced in send_tlb_invalidation */
MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
};
+ int ret;
- return send_tlb_invalidation(&gt->uc.guc, fence, action,
- ARRAY_SIZE(action));
+ ret = send_tlb_invalidation(&gt->uc.guc, fence, action,
+ ARRAY_SIZE(action));
+ /*
+ * -ECANCELED indicates the CT is stopped for a GT reset. TLB caches
+ * should be nuked on a GT reset so this error can be ignored.
+ */
+ if (ret == -ECANCELED)
+ return 0;
+
+ return ret;
}
/**
* xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT
- * @gt: graphics tile
+ * @gt: GT structure
*
* Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
* synchronous.
@@ -268,6 +295,7 @@ static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
+ unsigned int fw_ref;
if (xe_guc_ct_enabled(&gt->uc.guc.ct) &&
gt->uc.guc.submission_state.enabled) {
@@ -281,30 +309,39 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
xe_gt_tlb_invalidation_fence_wait(&fence);
} else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
+ struct xe_mmio *mmio = &gt->mmio;
+
if (IS_SRIOV_VF(xe))
return 0;
- xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
- xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1,
+ xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1,
PVC_GUC_TLB_INV_DESC1_INVALIDATE);
- xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC0,
+ xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC0,
PVC_GUC_TLB_INV_DESC0_VALID);
} else {
- xe_mmio_write32(gt, GUC_TLB_INV_CR,
+ xe_mmio_write32(mmio, GUC_TLB_INV_CR,
GUC_TLB_INV_CR_INVALIDATE);
}
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
return 0;
}
+/*
+ * Ensure that roundup_pow_of_two(length) doesn't overflow.
+ * Note that roundup_pow_of_two() operates on unsigned long,
+ * not on u64.
+ */
+#define MAX_RANGE_TLB_INVALIDATION_LENGTH (rounddown_pow_of_two(ULONG_MAX))
+
/**
* xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
* address range
*
- * @gt: graphics tile
+ * @gt: GT structure
* @fence: invalidation fence which will be signal on TLB invalidation
* completion
* @start: start address
@@ -324,6 +361,7 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_device *xe = gt_to_xe(gt);
#define MAX_TLB_INVALIDATION_LEN 7
u32 action[MAX_TLB_INVALIDATION_LEN];
+ u64 length = end - start;
int len = 0;
xe_gt_assert(gt, fence);
@@ -336,11 +374,11 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
- if (!xe->info.has_range_tlb_invalidation) {
+ if (!xe->info.has_range_tlb_invalidation ||
+ length > MAX_RANGE_TLB_INVALIDATION_LENGTH) {
action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
} else {
u64 orig_start = start;
- u64 length = end - start;
u64 align;
if (length < SZ_4K)
@@ -389,8 +427,30 @@ int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
}
/**
- * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
+ * xe_gt_tlb_invalidation_vm - Issue a TLB invalidation on this GT for a VM
* @gt: graphics tile
+ * @vm: VM to invalidate
+ *
+ * Invalidate entire VM's address space
+ */
+void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm)
+{
+ struct xe_gt_tlb_invalidation_fence fence;
+ u64 range = 1ull << vm->xe->info.va_bits;
+ int ret;
+
+ xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
+
+ ret = xe_gt_tlb_invalidation_range(gt, &fence, 0, range, vm->usm.asid);
+ if (ret < 0)
+ return;
+
+ xe_gt_tlb_invalidation_fence_wait(&fence);
+}
+
+/**
+ * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
+ * @gt: GT structure
* @fence: invalidation fence which will be signal on TLB invalidation
* completion, can be NULL
* @vma: VMA to invalidate
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index f430d5797af7..abe9b03d543e 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -12,14 +12,17 @@
struct xe_gt;
struct xe_guc;
+struct xe_vm;
struct xe_vma;
-int xe_gt_tlb_invalidation_init(struct xe_gt *gt);
+int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt);
+
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt);
int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
struct xe_vma *vma);
+void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm);
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
u64 start, u64 end, u32 asid);
@@ -28,6 +31,7 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
bool stack);
+void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence);
static inline void
xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index 0662f71c6ede..516c81e3b8dd 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -5,6 +5,7 @@
#include "xe_gt_topology.h"
+#include <generated/xe_wa_oob.h>
#include <linux/bitmap.h>
#include <linux/compiler.h>
@@ -12,6 +13,7 @@
#include "xe_assert.h"
#include "xe_gt.h"
#include "xe_mmio.h"
+#include "xe_wa.h"
static void
load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...)
@@ -25,7 +27,7 @@ load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...)
va_start(argp, numregs);
for (i = 0; i < numregs; i++)
- fuse_val[i] = xe_mmio_read32(gt, va_arg(argp, struct xe_reg));
+ fuse_val[i] = xe_mmio_read32(&gt->mmio, va_arg(argp, struct xe_reg));
va_end(argp);
bitmap_from_arr32(mask, fuse_val, numregs * 32);
@@ -35,7 +37,7 @@ static void
load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask, enum xe_gt_eu_type *eu_type)
{
struct xe_device *xe = gt_to_xe(gt);
- u32 reg_val = xe_mmio_read32(gt, XELP_EU_ENABLE);
+ u32 reg_val = xe_mmio_read32(&gt->mmio, XELP_EU_ENABLE);
u32 val = 0;
int i;
@@ -127,9 +129,31 @@ static void
load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask)
{
struct xe_device *xe = gt_to_xe(gt);
- u32 fuse3 = xe_mmio_read32(gt, MIRROR_FUSE3);
+ struct xe_mmio *mmio = &gt->mmio;
+ u32 fuse3 = xe_mmio_read32(mmio, MIRROR_FUSE3);
- if (GRAPHICS_VER(xe) >= 20) {
+ /*
+ * PTL platforms with media version 30.00 do not provide proper values
+ * for the media GT's L3 bank registers. Skip the readout since we
+ * don't have any way to obtain real values.
+ *
+ * This may get re-described as an official workaround in the future,
+ * but there's no tracking number assigned yet so we use a custom
+ * OOB workaround descriptor.
+ */
+ if (XE_WA(gt, no_media_l3))
+ return;
+
+ if (GRAPHICS_VER(xe) >= 30) {
+ xe_l3_bank_mask_t per_node = {};
+ u32 meml3_en = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, fuse3);
+ u32 mirror_l3bank_enable = xe_mmio_read32(mmio, MIRROR_L3BANK_ENABLE);
+ u32 bank_val = REG_FIELD_GET(XE3_L3BANK_ENABLE, mirror_l3bank_enable);
+
+ bitmap_from_arr32(per_node, &bank_val, 32);
+ gen_l3_mask_from_pattern(xe, l3_bank_mask, per_node, 32,
+ meml3_en);
+ } else if (GRAPHICS_VER(xe) >= 20) {
xe_l3_bank_mask_t per_node = {};
u32 meml3_en = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, fuse3);
u32 bank_val = REG_FIELD_GET(XE2_GT_L3_MODE_MASK, fuse3);
@@ -141,7 +165,7 @@ load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask)
xe_l3_bank_mask_t per_node = {};
xe_l3_bank_mask_t per_mask_bit = {};
u32 meml3_en = REG_FIELD_GET(MEML3_EN_MASK, fuse3);
- u32 fuse4 = xe_mmio_read32(gt, XEHP_FUSE4);
+ u32 fuse4 = xe_mmio_read32(mmio, XEHP_FUSE4);
u32 bank_val = REG_FIELD_GET(GT_L3_EXC_MASK, fuse4);
bitmap_set_value8(per_mask_bit, 0x3, 0);
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.h b/drivers/gpu/drm/xe/xe_gt_topology.h
index 746b325bbf6e..a72d26ba0653 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.h
+++ b/drivers/gpu/drm/xe/xe_gt_topology.h
@@ -25,6 +25,19 @@ void xe_gt_topology_init(struct xe_gt *gt);
void xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p);
+/**
+ * xe_gt_topology_mask_last_dss() - Returns the index of the last DSS in a mask.
+ * @mask: Input DSS mask
+ *
+ * Return: Index of the last DSS in the input DSS mask,
+ * XE_MAX_DSS_FUSE_BITS if DSS mask is empty.
+ */
+static inline unsigned int
+xe_gt_topology_mask_last_dss(const xe_dss_mask_t mask)
+{
+ return find_last_bit(mask, XE_MAX_DSS_FUSE_BITS);
+}
+
unsigned int
xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum);
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 3d1c51de0268..7def0959da35 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -6,14 +6,15 @@
#ifndef _XE_GT_TYPES_H_
#define _XE_GT_TYPES_H_
+#include "xe_device_types.h"
#include "xe_force_wake_types.h"
#include "xe_gt_idle_types.h"
#include "xe_gt_sriov_pf_types.h"
#include "xe_gt_sriov_vf_types.h"
-#include "xe_gt_stats.h"
+#include "xe_gt_stats_types.h"
#include "xe_hw_engine_types.h"
#include "xe_hw_fence_types.h"
-#include "xe_oa.h"
+#include "xe_oa_types.h"
#include "xe_reg_sr_types.h"
#include "xe_sa_types.h"
#include "xe_uc_types.h"
@@ -120,6 +121,8 @@ struct xe_gt {
enum xe_gt_type type;
/** @info.reference_clock: clock frequency */
u32 reference_clock;
+ /** @info.timestamp_base: GT timestamp base */
+ u32 timestamp_base;
/**
* @info.engine_mask: mask of engines present on GT. Some of
* them may be reserved in runtime and not available for user.
@@ -138,26 +141,27 @@ struct xe_gt {
/** @stats: GT stats */
struct {
/** @stats.counters: counters for various GT stats */
- atomic_t counters[__XE_GT_STATS_NUM_IDS];
+ atomic64_t counters[__XE_GT_STATS_NUM_IDS];
} stats;
#endif
/**
* @mmio: mmio info for GT. All GTs within a tile share the same
* register space, but have their own copy of GSI registers at a
- * specific offset, as well as their own forcewake handling.
+ * specific offset.
+ */
+ struct xe_mmio mmio;
+
+ /**
+ * @pm: power management info for GT. The driver uses the GT's
+ * "force wake" interface to wake up specific parts of the GT hardware
+ * from C6 sleep states and ensure the hardware remains awake while it
+ * is being actively used.
*/
struct {
- /** @mmio.fw: force wake for GT */
+ /** @pm.fw: force wake for GT */
struct xe_force_wake fw;
- /**
- * @mmio.adj_limit: adjust MMIO address if address is below this
- * value
- */
- u32 adj_limit;
- /** @mmio.adj_offset: offect to add to MMIO address when adjusting */
- u32 adj_offset;
- } mmio;
+ } pm;
/** @sriov: virtualization data related to GT */
union {
@@ -411,6 +415,16 @@ struct xe_gt {
bool oob_initialized;
} wa_active;
+ /** @tuning_active: keep track of active tunings */
+ struct {
+ /** @tuning_active.gt: bitmap with active GT tunings */
+ unsigned long *gt;
+ /** @tuning_active.engine: bitmap with active engine tunings */
+ unsigned long *engine;
+ /** @tuning_active.lrc: bitmap with active LRC tunings */
+ unsigned long *lrc;
+ } tuning_active;
+
/** @user_engines: engines present in GT and available to userspace */
struct {
/**
@@ -428,6 +442,9 @@ struct xe_gt {
/** @oa: oa observation subsystem per gt info */
struct xe_oa_gt oa;
+
+ /** @eu_stall: EU stall counters subsystem per gt info */
+ struct xe_eu_stall_gt *eu_stall;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index 52df28032a6f..bac5471a1a78 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -14,6 +14,7 @@
#include "regs/xe_gt_regs.h"
#include "regs/xe_gtt_defs.h"
#include "regs/xe_guc_regs.h"
+#include "regs/xe_irq_regs.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_force_wake.h"
@@ -22,8 +23,11 @@
#include "xe_gt_sriov_vf.h"
#include "xe_gt_throttle.h"
#include "xe_guc_ads.h"
+#include "xe_guc_buf.h"
+#include "xe_guc_capture.h"
#include "xe_guc_ct.h"
#include "xe_guc_db_mgr.h"
+#include "xe_guc_engine_activity.h"
#include "xe_guc_hwconfig.h"
#include "xe_guc_log.h"
#include "xe_guc_pc.h"
@@ -42,7 +46,15 @@ static u32 guc_bo_ggtt_addr(struct xe_guc *guc,
struct xe_bo *bo)
{
struct xe_device *xe = guc_to_xe(guc);
- u32 addr = xe_bo_ggtt_addr(bo);
+ u32 addr;
+
+ /*
+ * For most BOs, the address on the allocating tile is fine. However for
+ * some, e.g. G2G CTB, the address on a specific tile is required as it
+ * might be different for each tile. So, just always ask for the address
+ * on the target GuC.
+ */
+ addr = __xe_bo_ggtt_addr(bo, gt_to_tile(guc_to_gt(guc))->id);
/* GuC addresses above GUC_GGTT_TOP don't map through the GTT */
xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc)));
@@ -68,7 +80,7 @@ static u32 guc_ctl_debug_flags(struct xe_guc *guc)
static u32 guc_ctl_feature_flags(struct xe_guc *guc)
{
- u32 flags = 0;
+ u32 flags = GUC_CTL_ENABLE_LITE_RESTORE;
if (!guc_to_xe(guc)->info.skip_guc_pc)
flags |= GUC_CTL_ENABLE_SLPC;
@@ -137,6 +149,34 @@ static u32 guc_ctl_ads_flags(struct xe_guc *guc)
return flags;
}
+static bool needs_wa_dual_queue(struct xe_gt *gt)
+{
+ /*
+ * The DUAL_QUEUE_WA tells the GuC to not allow concurrent submissions
+ * on RCS and CCSes with different address spaces, which on DG2 is
+ * required as a WA for an HW bug.
+ */
+ if (XE_WA(gt, 22011391025))
+ return true;
+
+ /*
+ * On newer platforms, the HW has been updated to not allow parallel
+ * execution of different address spaces, so the RCS/CCS will stall the
+ * context switch if one of the other RCS/CCSes is busy with a different
+ * address space. While functionally correct, having a submission
+ * stalled on the HW limits the GuC ability to shuffle things around and
+ * can cause complications if the non-stalled submission runs for a long
+ * time, because the GuC doesn't know that the stalled submission isn't
+ * actually running and might declare it as hung. Therefore, we enable
+ * the DUAL_QUEUE_WA on all newer platforms on GTs that have CCS engines
+ * to move management back to the GuC.
+ */
+ if (CCS_MASK(gt) && GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270)
+ return true;
+
+ return false;
+}
+
static u32 guc_ctl_wa_flags(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
@@ -149,7 +189,7 @@ static u32 guc_ctl_wa_flags(struct xe_guc *guc)
if (XE_WA(gt, 14014475959))
flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
- if (XE_WA(gt, 22011391025))
+ if (needs_wa_dual_queue(gt))
flags |= GUC_WA_DUAL_QUEUE;
/*
@@ -236,20 +276,311 @@ static void guc_write_params(struct xe_guc *guc)
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
- xe_mmio_write32(gt, SOFT_SCRATCH(0), 0);
+ xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(0), 0);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
- xe_mmio_write32(gt, SOFT_SCRATCH(1 + i), guc->params[i]);
+ xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(1 + i), guc->params[i]);
+}
+
+static int guc_action_register_g2g_buffer(struct xe_guc *guc, u32 type, u32 dst_tile, u32 dst_dev,
+ u32 desc_addr, u32 buff_addr, u32 size)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 action[] = {
+ XE_GUC_ACTION_REGISTER_G2G,
+ FIELD_PREP(XE_G2G_REGISTER_SIZE, size / SZ_4K - 1) |
+ FIELD_PREP(XE_G2G_REGISTER_TYPE, type) |
+ FIELD_PREP(XE_G2G_REGISTER_TILE, dst_tile) |
+ FIELD_PREP(XE_G2G_REGISTER_DEVICE, dst_dev),
+ desc_addr,
+ buff_addr,
+ };
+
+ xe_assert(xe, (type == XE_G2G_TYPE_IN) || (type == XE_G2G_TYPE_OUT));
+ xe_assert(xe, !(size % SZ_4K));
+
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+static int guc_action_deregister_g2g_buffer(struct xe_guc *guc, u32 type, u32 dst_tile, u32 dst_dev)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 action[] = {
+ XE_GUC_ACTION_DEREGISTER_G2G,
+ FIELD_PREP(XE_G2G_DEREGISTER_TYPE, type) |
+ FIELD_PREP(XE_G2G_DEREGISTER_TILE, dst_tile) |
+ FIELD_PREP(XE_G2G_DEREGISTER_DEVICE, dst_dev),
+ };
+
+ xe_assert(xe, (type == XE_G2G_TYPE_IN) || (type == XE_G2G_TYPE_OUT));
+
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+#define G2G_DEV(gt) (((gt)->info.type == XE_GT_TYPE_MAIN) ? 0 : 1)
+
+#define G2G_BUFFER_SIZE (SZ_4K)
+#define G2G_DESC_SIZE (64)
+#define G2G_DESC_AREA_SIZE (SZ_4K)
+
+/*
+ * Generate a unique id for each bi-directional CTB for each pair of
+ * near and far tiles/devices. The id can then be used as an index into
+ * a single allocation that is sub-divided into multiple CTBs.
+ *
+ * For example, with two devices per tile and two tiles, the table should
+ * look like:
+ * Far <tile>.<dev>
+ * 0.0 0.1 1.0 1.1
+ * N 0.0 --/-- 00/01 02/03 04/05
+ * e 0.1 01/00 --/-- 06/07 08/09
+ * a 1.0 03/02 07/06 --/-- 10/11
+ * r 1.1 05/04 09/08 11/10 --/--
+ *
+ * Where each entry is Rx/Tx channel id.
+ *
+ * So GuC #3 (tile 1, dev 1) talking to GuC #2 (tile 1, dev 0) would
+ * be reading from channel #11 and writing to channel #10. Whereas,
+ * GuC #2 talking to GuC #3 would be read on #10 and write to #11.
+ */
+static unsigned int g2g_slot(u32 near_tile, u32 near_dev, u32 far_tile, u32 far_dev,
+ u32 type, u32 max_inst, bool have_dev)
+{
+ u32 near = near_tile, far = far_tile;
+ u32 idx = 0, x, y, direction;
+ int i;
+
+ if (have_dev) {
+ near = (near << 1) | near_dev;
+ far = (far << 1) | far_dev;
+ }
+
+ /* No need to send to one's self */
+ if (far == near)
+ return -1;
+
+ if (far > near) {
+ /* Top right table half */
+ x = far;
+ y = near;
+
+ /* T/R is 'forwards' direction */
+ direction = type;
+ } else {
+ /* Bottom left table half */
+ x = near;
+ y = far;
+
+ /* B/L is 'backwards' direction */
+ direction = (1 - type);
+ }
+
+ /* Count the rows prior to the target */
+ for (i = y; i > 0; i--)
+ idx += max_inst - i;
+
+ /* Count this row up to the target */
+ idx += (x - 1 - y);
+
+ /* Slots are in Rx/Tx pairs */
+ idx *= 2;
+
+ /* Pick Rx/Tx direction */
+ idx += direction;
+
+ return idx;
+}
+
+static int guc_g2g_register(struct xe_guc *near_guc, struct xe_gt *far_gt, u32 type, bool have_dev)
+{
+ struct xe_gt *near_gt = guc_to_gt(near_guc);
+ struct xe_device *xe = gt_to_xe(near_gt);
+ struct xe_bo *g2g_bo;
+ u32 near_tile = gt_to_tile(near_gt)->id;
+ u32 near_dev = G2G_DEV(near_gt);
+ u32 far_tile = gt_to_tile(far_gt)->id;
+ u32 far_dev = G2G_DEV(far_gt);
+ u32 max = xe->info.gt_count;
+ u32 base, desc, buf;
+ int slot;
+
+ /* G2G is not allowed between different cards */
+ xe_assert(xe, xe == gt_to_xe(far_gt));
+
+ g2g_bo = near_guc->g2g.bo;
+ xe_assert(xe, g2g_bo);
+
+ slot = g2g_slot(near_tile, near_dev, far_tile, far_dev, type, max, have_dev);
+ xe_assert(xe, slot >= 0);
+
+ base = guc_bo_ggtt_addr(near_guc, g2g_bo);
+ desc = base + slot * G2G_DESC_SIZE;
+ buf = base + G2G_DESC_AREA_SIZE + slot * G2G_BUFFER_SIZE;
+
+ xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE);
+ xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= g2g_bo->size);
+
+ return guc_action_register_g2g_buffer(near_guc, type, far_tile, far_dev,
+ desc, buf, G2G_BUFFER_SIZE);
+}
+
+static void guc_g2g_deregister(struct xe_guc *guc, u32 far_tile, u32 far_dev, u32 type)
+{
+ guc_action_deregister_g2g_buffer(guc, type, far_tile, far_dev);
+}
+
+static u32 guc_g2g_size(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int count = xe->info.gt_count;
+ u32 num_channels = (count * (count - 1)) / 2;
+
+ xe_assert(xe, num_channels * XE_G2G_TYPE_LIMIT * G2G_DESC_SIZE <= G2G_DESC_AREA_SIZE);
+
+ return num_channels * XE_G2G_TYPE_LIMIT * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE;
+}
+
+static bool xe_guc_g2g_wanted(struct xe_device *xe)
+{
+ /* Can't do GuC to GuC communication if there is only one GuC */
+ if (xe->info.gt_count <= 1)
+ return false;
+
+ /* No current user */
+ return false;
+}
+
+static int guc_g2g_alloc(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_bo *bo;
+ u32 g2g_size;
+
+ if (guc->g2g.bo)
+ return 0;
+
+ if (gt->info.id != 0) {
+ struct xe_gt *root_gt = xe_device_get_gt(xe, 0);
+ struct xe_guc *root_guc = &root_gt->uc.guc;
+ struct xe_bo *bo;
+
+ bo = xe_bo_get(root_guc->g2g.bo);
+ if (!bo)
+ return -ENODEV;
+
+ guc->g2g.bo = bo;
+ guc->g2g.owned = false;
+ return 0;
+ }
+
+ g2g_size = guc_g2g_size(guc);
+ bo = xe_managed_bo_create_pin_map(xe, tile, g2g_size,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_ALL |
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size);
+ guc->g2g.bo = bo;
+ guc->g2g.owned = true;
+
+ return 0;
+}
+
+static void guc_g2g_fini(struct xe_guc *guc)
+{
+ if (!guc->g2g.bo)
+ return;
+
+ /* Unpinning the owned object is handled by generic shutdown */
+ if (!guc->g2g.owned)
+ xe_bo_put(guc->g2g.bo);
+
+ guc->g2g.bo = NULL;
+}
+
+static int guc_g2g_start(struct xe_guc *guc)
+{
+ struct xe_gt *far_gt, *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int i, j;
+ int t, err;
+ bool have_dev;
+
+ if (!guc->g2g.bo) {
+ int ret;
+
+ ret = guc_g2g_alloc(guc);
+ if (ret)
+ return ret;
+ }
+
+ /* GuC interface will need extending if more GT device types are ever created. */
+ xe_gt_assert(gt, (gt->info.type == XE_GT_TYPE_MAIN) || (gt->info.type == XE_GT_TYPE_MEDIA));
+
+ /* Channel numbering depends on whether there are multiple GTs per tile */
+ have_dev = xe->info.gt_count > xe->info.tile_count;
+
+ for_each_gt(far_gt, xe, i) {
+ u32 far_tile, far_dev;
+
+ if (far_gt->info.id == gt->info.id)
+ continue;
+
+ far_tile = gt_to_tile(far_gt)->id;
+ far_dev = G2G_DEV(far_gt);
+
+ for (t = 0; t < XE_G2G_TYPE_LIMIT; t++) {
+ err = guc_g2g_register(guc, far_gt, t, have_dev);
+ if (err) {
+ while (--t >= 0)
+ guc_g2g_deregister(guc, far_tile, far_dev, t);
+ goto err_deregister;
+ }
+ }
+ }
+
+ return 0;
+
+err_deregister:
+ for_each_gt(far_gt, xe, j) {
+ u32 tile, dev;
+
+ if (far_gt->info.id == gt->info.id)
+ continue;
+
+ if (j >= i)
+ break;
+
+ tile = gt_to_tile(far_gt)->id;
+ dev = G2G_DEV(far_gt);
+
+ for (t = 0; t < XE_G2G_TYPE_LIMIT; t++)
+ guc_g2g_deregister(guc, tile, dev, t);
+ }
+
+ return err;
}
static void guc_fini_hw(void *arg)
{
struct xe_guc *guc = arg;
struct xe_gt *gt = guc_to_gt(guc);
+ unsigned int fw_ref;
- xe_gt_WARN_ON(gt, xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
xe_uc_fini_hw(&guc_to_gt(guc)->uc);
- xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+
+ guc_g2g_fini(guc);
}
/**
@@ -338,6 +669,10 @@ int xe_guc_init(struct xe_guc *guc)
if (ret)
goto out;
+ ret = xe_guc_capture_init(guc);
+ if (ret)
+ goto out;
+
ret = xe_guc_ads_init(&guc->ads);
if (ret)
goto out;
@@ -411,12 +746,29 @@ int xe_guc_init_post_hwconfig(struct xe_guc *guc)
if (ret)
return ret;
+ ret = xe_guc_engine_activity_init(guc);
+ if (ret)
+ return ret;
+
+ ret = xe_guc_buf_cache_init(&guc->buf);
+ if (ret)
+ return ret;
+
return xe_guc_ads_init_post_hwconfig(&guc->ads);
}
int xe_guc_post_load_init(struct xe_guc *guc)
{
+ int ret;
+
xe_guc_ads_populate_post_load(&guc->ads);
+
+ if (xe_guc_g2g_wanted(guc_to_xe(guc))) {
+ ret = guc_g2g_start(guc);
+ if (ret)
+ return ret;
+ }
+
guc->submission_state.enabled = true;
return 0;
@@ -425,6 +777,7 @@ int xe_guc_post_load_init(struct xe_guc *guc)
int xe_guc_reset(struct xe_guc *guc)
{
struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_mmio *mmio = &gt->mmio;
u32 guc_status, gdrst;
int ret;
@@ -433,15 +786,15 @@ int xe_guc_reset(struct xe_guc *guc)
if (IS_SRIOV_VF(gt_to_xe(gt)))
return xe_gt_sriov_vf_bootstrap(gt);
- xe_mmio_write32(gt, GDRST, GRDOM_GUC);
+ xe_mmio_write32(mmio, GDRST, GRDOM_GUC);
- ret = xe_mmio_wait32(gt, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false);
+ ret = xe_mmio_wait32(mmio, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false);
if (ret) {
xe_gt_err(gt, "GuC reset timed out, GDRST=%#x\n", gdrst);
goto err_out;
}
- guc_status = xe_mmio_read32(gt, GUC_STATUS);
+ guc_status = xe_mmio_read32(mmio, GUC_STATUS);
if (!(guc_status & GS_MIA_IN_RESET)) {
xe_gt_err(gt, "GuC status: %#x, MIA core expected to be in reset\n",
guc_status);
@@ -459,6 +812,7 @@ err_out:
static void guc_prepare_xfer(struct xe_guc *guc)
{
struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_mmio *mmio = &gt->mmio;
struct xe_device *xe = guc_to_xe(guc);
u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC |
GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
@@ -473,12 +827,12 @@ static void guc_prepare_xfer(struct xe_guc *guc)
shim_flags |= REG_FIELD_PREP(GUC_MOCS_INDEX_MASK, gt->mocs.uc_index);
/* Must program this register before loading the ucode with DMA */
- xe_mmio_write32(gt, GUC_SHIM_CONTROL, shim_flags);
+ xe_mmio_write32(mmio, GUC_SHIM_CONTROL, shim_flags);
- xe_mmio_write32(gt, GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+ xe_mmio_write32(mmio, GT_PM_CONFIG, GT_DOORBELL_ENABLE);
/* Make sure GuC receives ARAT interrupts */
- xe_mmio_rmw32(gt, PMINTRMSK, ARAT_EXPIRED_INTRMSK, 0);
+ xe_mmio_rmw32(mmio, PMINTRMSK, ARAT_EXPIRED_INTRMSK, 0);
}
/*
@@ -494,7 +848,7 @@ static int guc_xfer_rsa(struct xe_guc *guc)
if (guc->fw.rsa_size > 256) {
u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) +
xe_uc_fw_rsa_offset(&guc->fw);
- xe_mmio_write32(gt, UOS_RSA_SCRATCH(0), rsa_ggtt_addr);
+ xe_mmio_write32(&gt->mmio, UOS_RSA_SCRATCH(0), rsa_ggtt_addr);
return 0;
}
@@ -503,7 +857,7 @@ static int guc_xfer_rsa(struct xe_guc *guc)
return -ENOMEM;
for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
- xe_mmio_write32(gt, UOS_RSA_SCRATCH(i), rsa[i]);
+ xe_mmio_write32(&gt->mmio, UOS_RSA_SCRATCH(i), rsa[i]);
return 0;
}
@@ -583,7 +937,7 @@ static s32 guc_pc_get_cur_freq(struct xe_guc_pc *guc_pc)
* extreme thermal throttling. And a system that is that hot during boot is probably
* dead anyway!
*/
-#if defined(CONFIG_DRM_XE_DEBUG)
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
#define GUC_LOAD_RETRY_LIMIT 20
#else
#define GUC_LOAD_RETRY_LIMIT 3
@@ -593,6 +947,7 @@ static s32 guc_pc_get_cur_freq(struct xe_guc_pc *guc_pc)
static void guc_wait_ucode(struct xe_guc *guc)
{
struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_mmio *mmio = &gt->mmio;
struct xe_guc_pc *guc_pc = &gt->uc.guc.pc;
ktime_t before, after, delta;
int load_done;
@@ -619,7 +974,7 @@ static void guc_wait_ucode(struct xe_guc *guc)
* timeouts rather than allowing a huge timeout each time. So basically, need
* to treat a timeout no different to a value change.
*/
- ret = xe_mmio_wait32_not(gt, GUC_STATUS, GS_UKERNEL_MASK | GS_BOOTROM_MASK,
+ ret = xe_mmio_wait32_not(mmio, GUC_STATUS, GS_UKERNEL_MASK | GS_BOOTROM_MASK,
last_status, 1000 * 1000, &status, false);
if (ret < 0)
count++;
@@ -657,7 +1012,7 @@ static void guc_wait_ucode(struct xe_guc *guc)
switch (bootrom) {
case XE_BOOTROM_STATUS_NO_KEY_FOUND:
xe_gt_err(gt, "invalid key requested, header = 0x%08X\n",
- xe_mmio_read32(gt, GUC_HEADER_INFO));
+ xe_mmio_read32(mmio, GUC_HEADER_INFO));
break;
case XE_BOOTROM_STATUS_RSA_FAILED:
@@ -672,7 +1027,7 @@ static void guc_wait_ucode(struct xe_guc *guc)
switch (ukernel) {
case XE_GUC_LOAD_STATUS_EXCEPTION:
xe_gt_err(gt, "firmware exception. EIP: %#x\n",
- xe_mmio_read32(gt, SOFT_SCRATCH(13)));
+ xe_mmio_read32(mmio, SOFT_SCRATCH(13)));
break;
case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
@@ -824,10 +1179,10 @@ static void guc_handle_mmio_msg(struct xe_guc *guc)
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
- msg = xe_mmio_read32(gt, SOFT_SCRATCH(15));
+ msg = xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(15));
msg &= XE_GUC_RECV_MSG_EXCEPTION |
XE_GUC_RECV_MSG_CRASH_DUMP_POSTED;
- xe_mmio_write32(gt, SOFT_SCRATCH(15), 0);
+ xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(15), 0);
if (msg & XE_GUC_RECV_MSG_CRASH_DUMP_POSTED)
xe_gt_err(gt, "Received early GuC crash dump notification!\n");
@@ -844,14 +1199,14 @@ static void guc_enable_irq(struct xe_guc *guc)
REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
/* Primary GuC and media GuC share a single enable bit */
- xe_mmio_write32(gt, GUC_SG_INTR_ENABLE,
+ xe_mmio_write32(&gt->mmio, GUC_SG_INTR_ENABLE,
REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST));
/*
* There are separate mask bits for primary and media GuCs, so use
* a RMW operation to avoid clobbering the other GuC's setting.
*/
- xe_mmio_rmw32(gt, GUC_SG_INTR_MASK, events, 0);
+ xe_mmio_rmw32(&gt->mmio, GUC_SG_INTR_MASK, events, 0);
}
int xe_guc_enable_communication(struct xe_guc *guc)
@@ -863,7 +1218,7 @@ int xe_guc_enable_communication(struct xe_guc *guc)
struct xe_gt *gt = guc_to_gt(guc);
struct xe_tile *tile = gt_to_tile(gt);
- err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc);
+ err = xe_memirq_init_guc(&tile->memirq, guc);
if (err)
return err;
} else {
@@ -907,7 +1262,7 @@ void xe_guc_notify(struct xe_guc *guc)
* additional payload data to the GuC but this capability is not
* used by the firmware yet. Use default value in the meantime.
*/
- xe_mmio_write32(gt, guc->notify_reg, default_notify_data);
+ xe_mmio_write32(&gt->mmio, guc->notify_reg, default_notify_data);
}
int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr)
@@ -925,6 +1280,7 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_mmio *mmio = &gt->mmio;
u32 header, reply;
struct xe_reg reply_reg = xe_gt_is_media_type(gt) ?
MED_VF_SW_FLAG(0) : VF_SW_FLAG(0);
@@ -934,7 +1290,6 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT);
- xe_assert(xe, !xe_guc_ct_enabled(&guc->ct));
xe_assert(xe, len);
xe_assert(xe, len <= VF_SW_FLAG_COUNT);
xe_assert(xe, len <= MED_VF_SW_FLAG_COUNT);
@@ -947,19 +1302,19 @@ retry:
/* Not in critical data-path, just do if else for GT type */
if (xe_gt_is_media_type(gt)) {
for (i = 0; i < len; ++i)
- xe_mmio_write32(gt, MED_VF_SW_FLAG(i),
+ xe_mmio_write32(mmio, MED_VF_SW_FLAG(i),
request[i]);
- xe_mmio_read32(gt, MED_VF_SW_FLAG(LAST_INDEX));
+ xe_mmio_read32(mmio, MED_VF_SW_FLAG(LAST_INDEX));
} else {
for (i = 0; i < len; ++i)
- xe_mmio_write32(gt, VF_SW_FLAG(i),
+ xe_mmio_write32(mmio, VF_SW_FLAG(i),
request[i]);
- xe_mmio_read32(gt, VF_SW_FLAG(LAST_INDEX));
+ xe_mmio_read32(mmio, VF_SW_FLAG(LAST_INDEX));
}
xe_guc_notify(guc);
- ret = xe_mmio_wait32(gt, reply_reg, GUC_HXG_MSG_0_ORIGIN,
+ ret = xe_mmio_wait32(mmio, reply_reg, GUC_HXG_MSG_0_ORIGIN,
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC),
50000, &reply, false);
if (ret) {
@@ -969,7 +1324,7 @@ timeout:
return ret;
}
- header = xe_mmio_read32(gt, reply_reg);
+ header = xe_mmio_read32(mmio, reply_reg);
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) ==
GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
/*
@@ -985,7 +1340,7 @@ timeout:
BUILD_BUG_ON(FIELD_MAX(GUC_HXG_MSG_0_TYPE) != GUC_HXG_TYPE_RESPONSE_SUCCESS);
BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1);
- ret = xe_mmio_wait32(gt, reply_reg, resp_mask, resp_mask,
+ ret = xe_mmio_wait32(mmio, reply_reg, resp_mask, resp_mask,
1000000, &header, false);
if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
@@ -1032,13 +1387,14 @@ proto:
for (i = 1; i < VF_SW_FLAG_COUNT; i++) {
reply_reg.addr += sizeof(u32);
- response_buf[i] = xe_mmio_read32(gt, reply_reg);
+ response_buf[i] = xe_mmio_read32(mmio, reply_reg);
}
}
/* Use data from the GuC response as our return value */
return FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
}
+ALLOW_ERROR_INJECTION(xe_guc_mmio_send_recv, ERRNO);
int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len)
{
@@ -1088,10 +1444,21 @@ int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val)
return guc_self_cfg(guc, key, 2, val);
}
+static void xe_guc_sw_0_irq_handler(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ if (IS_SRIOV_VF(gt_to_xe(gt)))
+ xe_gt_sriov_vf_migrated_event_handler(gt);
+}
+
void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir)
{
if (iir & GUC_INTR_GUC2HOST)
xe_guc_ct_irq_handler(&guc->ct);
+
+ if (iir & GUC_INTR_SW_INT_0)
+ xe_guc_sw_0_irq_handler(guc);
}
void xe_guc_sanitize(struct xe_guc *guc)
@@ -1131,51 +1498,48 @@ void xe_guc_stop(struct xe_guc *guc)
int xe_guc_start(struct xe_guc *guc)
{
- if (!IS_SRIOV_VF(guc_to_xe(guc))) {
- int err;
-
- err = xe_guc_pc_start(&guc->pc);
- xe_gt_WARN(guc_to_gt(guc), err, "Failed to start GuC PC: %pe\n",
- ERR_PTR(err));
- }
-
return xe_guc_submit_start(guc);
}
void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
{
struct xe_gt *gt = guc_to_gt(guc);
+ unsigned int fw_ref;
u32 status;
- int err;
int i;
xe_uc_fw_print(&guc->fw, p);
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- return;
+ if (!IS_SRIOV_VF(gt_to_xe(gt))) {
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return;
+
+ status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
+
+ drm_printf(p, "\nGuC status 0x%08x:\n", status);
+ drm_printf(p, "\tBootrom status = 0x%x\n",
+ REG_FIELD_GET(GS_BOOTROM_MASK, status));
+ drm_printf(p, "\tuKernel status = 0x%x\n",
+ REG_FIELD_GET(GS_UKERNEL_MASK, status));
+ drm_printf(p, "\tMIA Core status = 0x%x\n",
+ REG_FIELD_GET(GS_MIA_MASK, status));
+ drm_printf(p, "\tLog level = %d\n",
+ xe_guc_log_get_level(&guc->log));
+
+ drm_puts(p, "\nScratch registers:\n");
+ for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
+ drm_printf(p, "\t%2d: \t0x%x\n",
+ i, xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(i)));
+ }
- status = xe_mmio_read32(gt, GUC_STATUS);
-
- drm_printf(p, "\nGuC status 0x%08x:\n", status);
- drm_printf(p, "\tBootrom status = 0x%x\n",
- REG_FIELD_GET(GS_BOOTROM_MASK, status));
- drm_printf(p, "\tuKernel status = 0x%x\n",
- REG_FIELD_GET(GS_UKERNEL_MASK, status));
- drm_printf(p, "\tMIA Core status = 0x%x\n",
- REG_FIELD_GET(GS_MIA_MASK, status));
- drm_printf(p, "\tLog level = %d\n",
- xe_guc_log_get_level(&guc->log));
-
- drm_puts(p, "\nScratch registers:\n");
- for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
- drm_printf(p, "\t%2d: \t0x%x\n",
- i, xe_mmio_read32(gt, SOFT_SCRATCH(i)));
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
-
+ drm_puts(p, "\n");
xe_guc_ct_print(&guc->ct, p, false);
+
+ drm_puts(p, "\n");
xe_guc_submit_print(guc, p);
}
diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h
index 42116b167c98..58338be44558 100644
--- a/drivers/gpu/drm/xe/xe_guc.h
+++ b/drivers/gpu/drm/xe/xe_guc.h
@@ -82,4 +82,9 @@ static inline struct xe_device *guc_to_xe(struct xe_guc *guc)
return gt_to_xe(guc_to_gt(guc));
}
+static inline struct drm_device *guc_to_drm(struct xe_guc *guc)
+{
+ return &guc_to_xe(guc)->drm;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index d1902a8581ca..44c1fa2fe7c8 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -5,6 +5,8 @@
#include "xe_guc_ads.h"
+#include <linux/fault-inject.h>
+
#include <drm/drm_managed.h>
#include <generated/xe_wa_oob.h>
@@ -18,6 +20,7 @@
#include "xe_gt_ccs_mode.h"
#include "xe_gt_printk.h"
#include "xe_guc.h"
+#include "xe_guc_capture.h"
#include "xe_guc_ct.h"
#include "xe_hw_engine.h"
#include "xe_lrc.h"
@@ -26,6 +29,7 @@
#include "xe_platform_types.h"
#include "xe_uc_fw.h"
#include "xe_wa.h"
+#include "xe_gt_mcr.h"
/* Slack of a few additional entries per engine */
#define ADS_REGSET_EXTRA_MAX 8
@@ -149,8 +153,7 @@ static u32 guc_ads_waklv_size(struct xe_guc_ads *ads)
static size_t guc_ads_capture_size(struct xe_guc_ads *ads)
{
- /* FIXME: Allocate a proper capture list */
- return PAGE_ALIGN(PAGE_SIZE);
+ return PAGE_ALIGN(ads->capture_size);
}
static size_t guc_ads_um_queues_size(struct xe_guc_ads *ads)
@@ -229,11 +232,6 @@ static size_t guc_ads_size(struct xe_guc_ads *ads)
guc_ads_private_data_size(ads);
}
-static bool needs_wa_1607983814(struct xe_device *xe)
-{
- return GRAPHICS_VERx100(xe) < 1250;
-}
-
static size_t calculate_regset_size(struct xe_gt *gt)
{
struct xe_reg_sr_entry *sr_entry;
@@ -248,7 +246,7 @@ static size_t calculate_regset_size(struct xe_gt *gt)
count += ADS_REGSET_EXTRA_MAX * XE_NUM_HW_ENGINES;
- if (needs_wa_1607983814(gt_to_xe(gt)))
+ if (XE_WA(gt, 1607983814))
count += LNCFCMOCS_REG_COUNT;
return count * sizeof(struct guc_mmio_reg);
@@ -344,7 +342,7 @@ static void guc_waklv_init(struct xe_guc_ads *ads)
offset = guc_ads_waklv_offset(ads);
remain = guc_ads_waklv_size(ads);
- if (XE_WA(gt, 14019882105))
+ if (XE_WA(gt, 14019882105) || XE_WA(gt, 16021333562))
guc_waklv_enable_simple(ads,
GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED,
&offset, &remain);
@@ -357,6 +355,11 @@ static void guc_waklv_init(struct xe_guc_ads *ads)
GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE,
&offset, &remain);
+ if (XE_WA(gt, 14022866841))
+ guc_waklv_enable_simple(ads,
+ GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO,
+ &offset, &remain);
+
/*
* On RC6 exit, GuC will write register 0xB04 with the default value provided. As of now,
* the default value for this register is determined to be 0xC40. This could change in the
@@ -373,6 +376,11 @@ static void guc_waklv_init(struct xe_guc_ads *ads)
GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET,
&offset, &remain);
+ if (GUC_FIRMWARE_VER(&gt->uc.guc) >= MAKE_GUC_VER(70, 44, 0) && XE_WA(gt, 16026508708))
+ guc_waklv_enable_simple(ads,
+ GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH,
+ &offset, &remain);
+
size = guc_ads_waklv_size(ads) - remain;
if (!size)
return;
@@ -404,13 +412,15 @@ int xe_guc_ads_init(struct xe_guc_ads *ads)
struct xe_bo *bo;
ads->golden_lrc_size = calculate_golden_lrc_size(ads);
+ ads->capture_size = xe_guc_capture_ads_input_worst_size(ads_to_guc(ads));
ads->regset_size = calculate_regset_size(gt);
ads->ads_waklv_size = calculate_waklv_size(ads);
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ads_size(ads) + MAX_GOLDEN_LRC_SIZE,
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -418,14 +428,15 @@ int xe_guc_ads_init(struct xe_guc_ads *ads)
return 0;
}
+ALLOW_ERROR_INJECTION(xe_guc_ads_init, ERRNO); /* See xe_pci_probe() */
/**
* xe_guc_ads_init_post_hwconfig - initialize ADS post hwconfig load
* @ads: Additional data structures object
*
- * Recalcuate golden_lrc_size & regset_size as the number hardware engines may
- * have changed after the hwconfig was loaded. Also verify the new sizes fit in
- * the already allocated ADS buffer object.
+ * Recalculate golden_lrc_size, capture_size and regset_size as the number
+ * hardware engines may have changed after the hwconfig was loaded. Also verify
+ * the new sizes fit in the already allocated ADS buffer object.
*
* Return: 0 on success, negative error code on error.
*/
@@ -437,6 +448,8 @@ int xe_guc_ads_init_post_hwconfig(struct xe_guc_ads *ads)
xe_gt_assert(gt, ads->bo);
ads->golden_lrc_size = calculate_golden_lrc_size(ads);
+ /* Calculate Capture size with worst size */
+ ads->capture_size = xe_guc_capture_ads_input_worst_size(ads_to_guc(ads));
ads->regset_size = calculate_regset_size(gt);
xe_gt_assert(gt, ads->golden_lrc_size +
@@ -483,24 +496,52 @@ static void fill_engine_enable_masks(struct xe_gt *gt,
engine_enable_mask(gt, XE_ENGINE_CLASS_OTHER));
}
-static void guc_prep_golden_lrc_null(struct xe_guc_ads *ads)
+/*
+ * Write the offsets corresponding to the golden LRCs. The actual data is
+ * populated later by guc_golden_lrc_populate()
+ */
+static void guc_golden_lrc_init(struct xe_guc_ads *ads)
{
struct xe_device *xe = ads_to_xe(ads);
+ struct xe_gt *gt = ads_to_gt(ads);
struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
offsetof(struct __guc_ads_blob, system_info));
- u8 guc_class;
+ size_t alloc_size, real_size;
+ u32 addr_ggtt, offset;
+ int class;
+
+ offset = guc_ads_golden_lrc_offset(ads);
+ addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset;
+
+ for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
+ u8 guc_class;
+
+ guc_class = xe_engine_class_to_guc_class(class);
- for (guc_class = 0; guc_class <= GUC_MAX_ENGINE_CLASSES; ++guc_class) {
if (!info_map_read(xe, &info_map,
engine_enabled_masks[guc_class]))
continue;
+ real_size = xe_gt_lrc_size(gt, class);
+ alloc_size = PAGE_ALIGN(real_size);
+
+ /*
+ * This interface is slightly confusing. We need to pass the
+ * base address of the full golden context and the size of just
+ * the engine state, which is the section of the context image
+ * that starts after the execlists LRC registers. This is
+ * required to allow the GuC to restore just the engine state
+ * when a watchdog reset occurs.
+ * We calculate the engine state size by removing the size of
+ * what comes before it in the context image (which is identical
+ * on all engines).
+ */
ads_blob_write(ads, ads.eng_state_size[guc_class],
- guc_ads_golden_lrc_size(ads) -
- xe_lrc_skip_size(xe));
+ real_size - xe_lrc_skip_size(xe));
ads_blob_write(ads, ads.golden_context_lrca[guc_class],
- xe_bo_ggtt_addr(ads->bo) +
- guc_ads_golden_lrc_offset(ads));
+ addr_ggtt);
+
+ addr_ggtt += alloc_size;
}
}
@@ -536,20 +577,148 @@ static void guc_mapping_table_init(struct xe_gt *gt,
}
}
-static void guc_capture_list_init(struct xe_guc_ads *ads)
+static u32 guc_get_capture_engine_mask(struct xe_gt *gt, struct iosys_map *info_map,
+ enum guc_capture_list_class_type capture_class)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 mask;
+
+ switch (capture_class) {
+ case GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE:
+ mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_RENDER_CLASS]);
+ mask |= info_map_read(xe, info_map, engine_enabled_masks[GUC_COMPUTE_CLASS]);
+ break;
+ case GUC_CAPTURE_LIST_CLASS_VIDEO:
+ mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_VIDEO_CLASS]);
+ break;
+ case GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE:
+ mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_VIDEOENHANCE_CLASS]);
+ break;
+ case GUC_CAPTURE_LIST_CLASS_BLITTER:
+ mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_BLITTER_CLASS]);
+ break;
+ case GUC_CAPTURE_LIST_CLASS_GSC_OTHER:
+ mask = info_map_read(xe, info_map, engine_enabled_masks[GUC_GSC_OTHER_CLASS]);
+ break;
+ default:
+ mask = 0;
+ }
+
+ return mask;
+}
+
+static inline bool get_capture_list(struct xe_guc_ads *ads, struct xe_guc *guc, struct xe_gt *gt,
+ int owner, int type, int class, u32 *total_size, size_t *size,
+ void **pptr)
+{
+ *size = 0;
+
+ if (!xe_guc_capture_getlistsize(guc, owner, type, class, size)) {
+ if (*total_size + *size > ads->capture_size)
+ xe_gt_dbg(gt, "Capture size overflow :%zu vs %d\n",
+ *total_size + *size, ads->capture_size);
+ else if (!xe_guc_capture_getlist(guc, owner, type, class, pptr))
+ return false;
+ }
+
+ return true;
+}
+
+static int guc_capture_prep_lists(struct xe_guc_ads *ads)
{
+ struct xe_guc *guc = ads_to_guc(ads);
+ struct xe_gt *gt = ads_to_gt(ads);
+ u32 ads_ggtt, capture_offset, null_ggtt, total_size = 0;
+ struct iosys_map info_map;
+ size_t size = 0;
+ void *ptr;
int i, j;
- u32 addr = xe_bo_ggtt_addr(ads->bo) + guc_ads_capture_offset(ads);
- /* FIXME: Populate a proper capture list */
+ /*
+ * GuC Capture's steered reg-list needs to be allocated and initialized
+ * after the GuC-hwconfig is available which guaranteed from here.
+ */
+ xe_guc_capture_steered_list_init(ads_to_guc(ads));
+
+ capture_offset = guc_ads_capture_offset(ads);
+ ads_ggtt = xe_bo_ggtt_addr(ads->bo);
+ info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
+ offsetof(struct __guc_ads_blob, system_info));
+
+ /* first, set aside the first page for a capture_list with zero descriptors */
+ total_size = PAGE_SIZE;
+ if (!xe_guc_capture_getnullheader(guc, &ptr, &size))
+ xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, ptr, size);
+
+ null_ggtt = ads_ggtt + capture_offset;
+ capture_offset += PAGE_SIZE;
+
+ /*
+ * Populate capture list : at this point adps is already allocated and
+ * mapped to worst case size
+ */
for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) {
- for (j = 0; j < GUC_MAX_ENGINE_CLASSES; j++) {
- ads_blob_write(ads, ads.capture_instance[i][j], addr);
- ads_blob_write(ads, ads.capture_class[i][j], addr);
+ bool write_empty_list;
+
+ for (j = 0; j < GUC_CAPTURE_LIST_CLASS_MAX; j++) {
+ u32 engine_mask = guc_get_capture_engine_mask(gt, &info_map, j);
+ /* null list if we dont have said engine or list */
+ if (!engine_mask) {
+ ads_blob_write(ads, ads.capture_class[i][j], null_ggtt);
+ ads_blob_write(ads, ads.capture_instance[i][j], null_ggtt);
+ continue;
+ }
+
+ /* engine exists: start with engine-class registers */
+ write_empty_list = get_capture_list(ads, guc, gt, i,
+ GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS,
+ j, &total_size, &size, &ptr);
+ if (!write_empty_list) {
+ ads_blob_write(ads, ads.capture_class[i][j],
+ ads_ggtt + capture_offset);
+ xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset,
+ ptr, size);
+ total_size += size;
+ capture_offset += size;
+ } else {
+ ads_blob_write(ads, ads.capture_class[i][j], null_ggtt);
+ }
+
+ /* engine exists: next, engine-instance registers */
+ write_empty_list = get_capture_list(ads, guc, gt, i,
+ GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE,
+ j, &total_size, &size, &ptr);
+ if (!write_empty_list) {
+ ads_blob_write(ads, ads.capture_instance[i][j],
+ ads_ggtt + capture_offset);
+ xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset,
+ ptr, size);
+ total_size += size;
+ capture_offset += size;
+ } else {
+ ads_blob_write(ads, ads.capture_instance[i][j], null_ggtt);
+ }
}
- ads_blob_write(ads, ads.capture_global[i], addr);
+ /* global registers is last in our PF/VF loops */
+ write_empty_list = get_capture_list(ads, guc, gt, i,
+ GUC_STATE_CAPTURE_TYPE_GLOBAL,
+ 0, &total_size, &size, &ptr);
+ if (!write_empty_list) {
+ ads_blob_write(ads, ads.capture_global[i], ads_ggtt + capture_offset);
+ xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), capture_offset, ptr,
+ size);
+ total_size += size;
+ capture_offset += size;
+ } else {
+ ads_blob_write(ads, ads.capture_global[i], null_ggtt);
+ }
}
+
+ if (ads->capture_size != PAGE_ALIGN(total_size))
+ xe_gt_dbg(gt, "Updated ADS capture size %d (was %d)\n",
+ PAGE_ALIGN(total_size), ads->capture_size);
+ return PAGE_ALIGN(total_size);
}
static void guc_mmio_regset_write_one(struct xe_guc_ads *ads,
@@ -562,6 +731,20 @@ static void guc_mmio_regset_write_one(struct xe_guc_ads *ads,
.flags = reg.masked ? GUC_REGSET_MASKED : 0,
};
+ if (reg.mcr) {
+ struct xe_reg_mcr mcr_reg = XE_REG_MCR(reg.addr);
+ u8 group, instance;
+
+ bool steer = xe_gt_mcr_get_nonterminated_steering(ads_to_gt(ads), mcr_reg,
+ &group, &instance);
+
+ if (steer) {
+ entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, group);
+ entry.flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, instance);
+ entry.flags |= GUC_REGSET_STEERING_NEEDED;
+ }
+ }
+
xe_map_memcpy_to(ads_to_xe(ads), regset_map, n_entry * sizeof(entry),
&entry, sizeof(entry));
}
@@ -570,7 +753,6 @@ static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads,
struct iosys_map *regset_map,
struct xe_hw_engine *hwe)
{
- struct xe_device *xe = ads_to_xe(ads);
struct xe_hw_engine *hwe_rcs_reset_domain =
xe_gt_any_hw_engine_by_reset_domain(hwe->gt, XE_ENGINE_CLASS_RENDER);
struct xe_reg_sr_entry *entry;
@@ -601,8 +783,7 @@ static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads,
guc_mmio_regset_write_one(ads, regset_map, e->reg, count++);
}
- /* Wa_1607983814 */
- if (needs_wa_1607983814(xe) && hwe->class == XE_ENGINE_CLASS_RENDER) {
+ if (XE_WA(hwe->gt, 1607983814) && hwe->class == XE_ENGINE_CLASS_RENDER) {
for (i = 0; i < LNCFCMOCS_REG_COUNT; i++) {
guc_mmio_regset_write_one(ads, regset_map,
XELP_LNCFCMOCS(i), count++);
@@ -684,7 +865,7 @@ static void guc_doorbell_init(struct xe_guc_ads *ads)
if (GRAPHICS_VER(xe) >= 12 && !IS_DGFX(xe)) {
u32 distdbreg =
- xe_mmio_read32(gt, DIST_DBS_POPULATED);
+ xe_mmio_read32(&gt->mmio, DIST_DBS_POPULATED);
ads_blob_write(ads,
system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI],
@@ -710,7 +891,7 @@ void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads)
xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size);
guc_policies_init(ads);
- guc_prep_golden_lrc_null(ads);
+ guc_golden_lrc_init(ads);
guc_mapping_table_init_invalid(gt, &info_map);
guc_doorbell_init(ads);
@@ -736,9 +917,9 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads)
guc_policies_init(ads);
fill_engine_enable_masks(gt, &info_map);
guc_mmio_reg_state_init(ads);
- guc_prep_golden_lrc_null(ads);
+ guc_golden_lrc_init(ads);
guc_mapping_table_init(gt, &info_map);
- guc_capture_list_init(ads);
+ guc_capture_prep_lists(ads);
guc_doorbell_init(ads);
guc_waklv_init(ads);
@@ -756,18 +937,22 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads)
guc_ads_private_data_offset(ads));
}
-static void guc_populate_golden_lrc(struct xe_guc_ads *ads)
+/*
+ * After the golden LRC's are recorded for each engine class by the first
+ * submission, copy them to the ADS, as initialized earlier by
+ * guc_golden_lrc_init().
+ */
+static void guc_golden_lrc_populate(struct xe_guc_ads *ads)
{
struct xe_device *xe = ads_to_xe(ads);
struct xe_gt *gt = ads_to_gt(ads);
struct iosys_map info_map = IOSYS_MAP_INIT_OFFSET(ads_to_map(ads),
offsetof(struct __guc_ads_blob, system_info));
size_t total_size = 0, alloc_size, real_size;
- u32 addr_ggtt, offset;
+ u32 offset;
int class;
offset = guc_ads_golden_lrc_offset(ads);
- addr_ggtt = xe_bo_ggtt_addr(ads->bo) + offset;
for (class = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
u8 guc_class;
@@ -784,26 +969,9 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads)
alloc_size = PAGE_ALIGN(real_size);
total_size += alloc_size;
- /*
- * This interface is slightly confusing. We need to pass the
- * base address of the full golden context and the size of just
- * the engine state, which is the section of the context image
- * that starts after the execlists LRC registers. This is
- * required to allow the GuC to restore just the engine state
- * when a watchdog reset occurs.
- * We calculate the engine state size by removing the size of
- * what comes before it in the context image (which is identical
- * on all engines).
- */
- ads_blob_write(ads, ads.eng_state_size[guc_class],
- real_size - xe_lrc_skip_size(xe));
- ads_blob_write(ads, ads.golden_context_lrca[guc_class],
- addr_ggtt);
-
xe_map_memcpy_to(xe, ads_to_map(ads), offset,
gt->default_lrc[class], real_size);
- addr_ggtt += alloc_size;
offset += alloc_size;
}
@@ -812,7 +980,7 @@ static void guc_populate_golden_lrc(struct xe_guc_ads *ads)
void xe_guc_ads_populate_post_load(struct xe_guc_ads *ads)
{
- guc_populate_golden_lrc(ads);
+ guc_golden_lrc_populate(ads);
}
static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_offset)
diff --git a/drivers/gpu/drm/xe/xe_guc_ads_types.h b/drivers/gpu/drm/xe/xe_guc_ads_types.h
index 2de5decfe0fd..70c132458ac3 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_ads_types.h
@@ -22,6 +22,8 @@ struct xe_guc_ads {
u32 regset_size;
/** @ads_waklv_size: total waklv size supported by platform */
u32 ads_waklv_size;
+ /** @capture_size: size of register set passed to GuC for capture */
+ u32 capture_size;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_buf.c b/drivers/gpu/drm/xe/xe_guc_buf.c
new file mode 100644
index 000000000000..0193c94dd6a0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_buf.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <linux/cleanup.h>
+#include <drm/drm_managed.h>
+
+#include "xe_assert.h"
+#include "xe_bo.h"
+#include "xe_gt_printk.h"
+#include "xe_guc.h"
+#include "xe_guc_buf.h"
+#include "xe_sa.h"
+
+static struct xe_guc *cache_to_guc(struct xe_guc_buf_cache *cache)
+{
+ return container_of(cache, struct xe_guc, buf);
+}
+
+static struct xe_gt *cache_to_gt(struct xe_guc_buf_cache *cache)
+{
+ return guc_to_gt(cache_to_guc(cache));
+}
+
+/**
+ * xe_guc_buf_cache_init() - Initialize the GuC Buffer Cache.
+ * @cache: the &xe_guc_buf_cache to initialize
+ *
+ * The Buffer Cache allows to obtain a reusable buffer that can be used to pass
+ * indirect H2G data to GuC without a need to create a ad-hoc allocation.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache)
+{
+ struct xe_gt *gt = cache_to_gt(cache);
+ struct xe_sa_manager *sam;
+
+ /* XXX: currently it's useful only for the PF actions */
+ if (!IS_SRIOV_PF(gt_to_xe(gt)))
+ return 0;
+
+ sam = __xe_sa_bo_manager_init(gt_to_tile(gt), SZ_8K, 0, sizeof(u32));
+ if (IS_ERR(sam))
+ return PTR_ERR(sam);
+ cache->sam = sam;
+
+ xe_gt_dbg(gt, "reusable buffer with %u dwords at %#x for %ps\n",
+ xe_guc_buf_cache_dwords(cache), xe_bo_ggtt_addr(sam->bo),
+ __builtin_return_address(0));
+ return 0;
+}
+
+/**
+ * xe_guc_buf_cache_dwords() - Number of dwords the GuC Buffer Cache supports.
+ * @cache: the &xe_guc_buf_cache to query
+ *
+ * Return: a size of the largest reusable buffer (in dwords)
+ */
+u32 xe_guc_buf_cache_dwords(struct xe_guc_buf_cache *cache)
+{
+ return cache->sam ? cache->sam->base.size / sizeof(u32) : 0;
+}
+
+/**
+ * xe_guc_buf_reserve() - Reserve a new sub-allocation.
+ * @cache: the &xe_guc_buf_cache where reserve sub-allocation
+ * @dwords: the requested size of the buffer in dwords
+ *
+ * Use xe_guc_buf_is_valid() to check if returned buffer reference is valid.
+ * Must use xe_guc_buf_release() to release a sub-allocation.
+ *
+ * Return: a &xe_guc_buf of new sub-allocation.
+ */
+struct xe_guc_buf xe_guc_buf_reserve(struct xe_guc_buf_cache *cache, u32 dwords)
+{
+ struct drm_suballoc *sa;
+
+ if (cache->sam)
+ sa = __xe_sa_bo_new(cache->sam, dwords * sizeof(u32), GFP_ATOMIC);
+ else
+ sa = ERR_PTR(-EOPNOTSUPP);
+
+ return (struct xe_guc_buf){ .sa = sa };
+}
+
+/**
+ * xe_guc_buf_from_data() - Reserve a new sub-allocation using data.
+ * @cache: the &xe_guc_buf_cache where reserve sub-allocation
+ * @data: the data to flush the sub-allocation
+ * @size: the size of the data
+ *
+ * Similar to xe_guc_buf_reserve() but flushes @data to the GPU memory.
+ *
+ * Return: a &xe_guc_buf of new sub-allocation.
+ */
+struct xe_guc_buf xe_guc_buf_from_data(struct xe_guc_buf_cache *cache,
+ const void *data, size_t size)
+{
+ struct drm_suballoc *sa;
+
+ sa = __xe_sa_bo_new(cache->sam, size, GFP_ATOMIC);
+ if (!IS_ERR(sa))
+ memcpy(xe_sa_bo_cpu_addr(sa), data, size);
+
+ return (struct xe_guc_buf){ .sa = sa };
+}
+
+/**
+ * xe_guc_buf_release() - Release a sub-allocation.
+ * @buf: the &xe_guc_buf to release
+ *
+ * Releases a sub-allocation reserved by the xe_guc_buf_reserve().
+ */
+void xe_guc_buf_release(const struct xe_guc_buf buf)
+{
+ if (xe_guc_buf_is_valid(buf))
+ xe_sa_bo_free(buf.sa, NULL);
+}
+
+/**
+ * xe_guc_buf_flush() - Copy the data from the sub-allocation to the GPU memory.
+ * @buf: the &xe_guc_buf to flush
+ *
+ * Return: a GPU address of the sub-allocation.
+ */
+u64 xe_guc_buf_flush(const struct xe_guc_buf buf)
+{
+ xe_sa_bo_flush_write(buf.sa);
+ return xe_sa_bo_gpu_addr(buf.sa);
+}
+
+/**
+ * xe_guc_buf_cpu_ptr() - Obtain a CPU pointer to the sub-allocation.
+ * @buf: the &xe_guc_buf to query
+ *
+ * Return: a CPU pointer of the sub-allocation.
+ */
+void *xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf)
+{
+ return xe_sa_bo_cpu_addr(buf.sa);
+}
+
+/**
+ * xe_guc_buf_gpu_addr() - Obtain a GPU address of the sub-allocation.
+ * @buf: the &xe_guc_buf to query
+ *
+ * Return: a GPU address of the sub-allocation.
+ */
+u64 xe_guc_buf_gpu_addr(const struct xe_guc_buf buf)
+{
+ return xe_sa_bo_gpu_addr(buf.sa);
+}
+
+/**
+ * xe_guc_cache_gpu_addr_from_ptr() - Lookup a GPU address using the pointer.
+ * @cache: the &xe_guc_buf_cache with sub-allocations
+ * @ptr: the CPU pointer of the sub-allocation
+ * @size: the size of the data
+ *
+ * Return: a GPU address on success or 0 if the pointer was unrelated.
+ */
+u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *ptr, u32 size)
+{
+ ptrdiff_t offset = ptr - cache->sam->cpu_ptr;
+
+ if (offset < 0 || offset + size > cache->sam->base.size)
+ return 0;
+
+ return cache->sam->gpu_addr + offset;
+}
+
+#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_guc_buf_kunit.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_buf.h b/drivers/gpu/drm/xe/xe_guc_buf.h
new file mode 100644
index 000000000000..0d67604d96bd
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_buf.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GUC_BUF_H_
+#define _XE_GUC_BUF_H_
+
+#include <linux/cleanup.h>
+#include <linux/err.h>
+
+#include "xe_guc_buf_types.h"
+
+int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache);
+u32 xe_guc_buf_cache_dwords(struct xe_guc_buf_cache *cache);
+struct xe_guc_buf xe_guc_buf_reserve(struct xe_guc_buf_cache *cache, u32 dwords);
+struct xe_guc_buf xe_guc_buf_from_data(struct xe_guc_buf_cache *cache,
+ const void *data, size_t size);
+void xe_guc_buf_release(const struct xe_guc_buf buf);
+
+/**
+ * xe_guc_buf_is_valid() - Check if a buffer reference is valid.
+ * @buf: the &xe_guc_buf reference to check
+ *
+ * Return: true if @ref represents a valid sub-allication.
+ */
+static inline bool xe_guc_buf_is_valid(const struct xe_guc_buf buf)
+{
+ return !IS_ERR_OR_NULL(buf.sa);
+}
+
+void *xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf);
+u64 xe_guc_buf_flush(const struct xe_guc_buf buf);
+u64 xe_guc_buf_gpu_addr(const struct xe_guc_buf buf);
+u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *ptr, u32 size);
+
+DEFINE_CLASS(xe_guc_buf, struct xe_guc_buf,
+ xe_guc_buf_release(_T),
+ xe_guc_buf_reserve(cache, num),
+ struct xe_guc_buf_cache *cache, u32 num);
+
+DEFINE_CLASS(xe_guc_buf_from_data, struct xe_guc_buf,
+ xe_guc_buf_release(_T),
+ xe_guc_buf_from_data(cache, data, size),
+ struct xe_guc_buf_cache *cache, const void *data, size_t size);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_buf_types.h b/drivers/gpu/drm/xe/xe_guc_buf_types.h
new file mode 100644
index 000000000000..9e123d71c064
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_buf_types.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GUC_BUF_TYPES_H_
+#define _XE_GUC_BUF_TYPES_H_
+
+struct drm_suballoc;
+struct xe_sa_manager;
+
+/**
+ * struct xe_guc_buf_cache - GuC Data Buffer Cache.
+ */
+struct xe_guc_buf_cache {
+ /* private: internal sub-allocation manager */
+ struct xe_sa_manager *sam;
+};
+
+/**
+ * struct xe_guc_buf - GuC Data Buffer Reference.
+ */
+struct xe_guc_buf {
+ /* private: internal sub-allocation reference */
+ struct drm_suballoc *sa;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c
new file mode 100644
index 000000000000..859a3ba91be5
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_capture.c
@@ -0,0 +1,2011 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021-2024 Intel Corporation
+ */
+
+#include <linux/types.h>
+
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+
+#include "abi/guc_actions_abi.h"
+#include "abi/guc_capture_abi.h"
+#include "abi/guc_log_abi.h"
+#include "regs/xe_engine_regs.h"
+#include "regs/xe_gt_regs.h"
+#include "regs/xe_guc_regs.h"
+#include "regs/xe_regs.h"
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_exec_queue_types.h"
+#include "xe_gt.h"
+#include "xe_gt_mcr.h"
+#include "xe_gt_printk.h"
+#include "xe_guc.h"
+#include "xe_guc_ads.h"
+#include "xe_guc_capture.h"
+#include "xe_guc_capture_types.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_exec_queue_types.h"
+#include "xe_guc_log.h"
+#include "xe_guc_submit_types.h"
+#include "xe_guc_submit.h"
+#include "xe_hw_engine_types.h"
+#include "xe_hw_engine.h"
+#include "xe_lrc.h"
+#include "xe_macros.h"
+#include "xe_map.h"
+#include "xe_mmio.h"
+#include "xe_sched_job.h"
+
+/*
+ * struct __guc_capture_bufstate
+ *
+ * Book-keeping structure used to track read and write pointers
+ * as we extract error capture data from the GuC-log-buffer's
+ * error-capture region as a stream of dwords.
+ */
+struct __guc_capture_bufstate {
+ u32 size;
+ u32 data_offset;
+ u32 rd;
+ u32 wr;
+};
+
+/*
+ * struct __guc_capture_parsed_output - extracted error capture node
+ *
+ * A single unit of extracted error-capture output data grouped together
+ * at an engine-instance level. We keep these nodes in a linked list.
+ * See cachelist and outlist below.
+ */
+struct __guc_capture_parsed_output {
+ /*
+ * A single set of 3 capture lists: a global-list
+ * an engine-class-list and an engine-instance list.
+ * outlist in __guc_capture_parsed_output will keep
+ * a linked list of these nodes that will eventually
+ * be detached from outlist and attached into to
+ * xe_codedump in response to a context reset
+ */
+ struct list_head link;
+ bool is_partial;
+ u32 eng_class;
+ u32 eng_inst;
+ u32 guc_id;
+ u32 lrca;
+ u32 type;
+ bool locked;
+ enum xe_hw_engine_snapshot_source_id source;
+ struct gcap_reg_list_info {
+ u32 vfid;
+ u32 num_regs;
+ struct guc_mmio_reg *regs;
+ } reginfo[GUC_STATE_CAPTURE_TYPE_MAX];
+#define GCAP_PARSED_REGLIST_INDEX_GLOBAL BIT(GUC_STATE_CAPTURE_TYPE_GLOBAL)
+#define GCAP_PARSED_REGLIST_INDEX_ENGCLASS BIT(GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS)
+};
+
+/*
+ * Define all device tables of GuC error capture register lists
+ * NOTE:
+ * For engine-registers, GuC only needs the register offsets
+ * from the engine-mmio-base
+ *
+ * 64 bit registers need 2 entries for low 32 bit register and high 32 bit
+ * register, for example:
+ * Register data_type flags mask Register name
+ * { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
+ * { XXX_REG_HI(0), REG_64BIT_HI_DW,, 0, 0, "XXX_REG"},
+ * 1. data_type: Indicate is hi/low 32 bit for a 64 bit register
+ * A 64 bit register define requires 2 consecutive entries,
+ * with low dword first and hi dword the second.
+ * 2. Register name: null for incompleted define
+ * 3. Incorrect order will trigger XE_WARN.
+ */
+#define COMMON_XELP_BASE_GLOBAL \
+ { FORCEWAKE_GT, REG_32BIT, 0, 0, 0, "FORCEWAKE_GT"}
+
+#define COMMON_BASE_ENGINE_INSTANCE \
+ { RING_HWSTAM(0), REG_32BIT, 0, 0, 0, "HWSTAM"}, \
+ { RING_HWS_PGA(0), REG_32BIT, 0, 0, 0, "RING_HWS_PGA"}, \
+ { RING_HEAD(0), REG_32BIT, 0, 0, 0, "RING_HEAD"}, \
+ { RING_TAIL(0), REG_32BIT, 0, 0, 0, "RING_TAIL"}, \
+ { RING_CTL(0), REG_32BIT, 0, 0, 0, "RING_CTL"}, \
+ { RING_MI_MODE(0), REG_32BIT, 0, 0, 0, "RING_MI_MODE"}, \
+ { RING_MODE(0), REG_32BIT, 0, 0, 0, "RING_MODE"}, \
+ { RING_ESR(0), REG_32BIT, 0, 0, 0, "RING_ESR"}, \
+ { RING_EMR(0), REG_32BIT, 0, 0, 0, "RING_EMR"}, \
+ { RING_EIR(0), REG_32BIT, 0, 0, 0, "RING_EIR"}, \
+ { RING_IMR(0), REG_32BIT, 0, 0, 0, "RING_IMR"}, \
+ { RING_IPEHR(0), REG_32BIT, 0, 0, 0, "IPEHR"}, \
+ { RING_INSTDONE(0), REG_32BIT, 0, 0, 0, "RING_INSTDONE"}, \
+ { INDIRECT_RING_STATE(0), REG_32BIT, 0, 0, 0, "INDIRECT_RING_STATE"}, \
+ { RING_ACTHD(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_ACTHD_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "ACTHD"}, \
+ { RING_BBADDR(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_BBADDR_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_BBADDR"}, \
+ { RING_START(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_START_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_START"}, \
+ { RING_DMA_FADD(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_DMA_FADD_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_DMA_FADD"}, \
+ { RING_EXECLIST_STATUS_LO(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_EXECLIST_STATUS_HI(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_EXECLIST_STATUS"}, \
+ { RING_EXECLIST_SQ_CONTENTS_LO(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
+ { RING_EXECLIST_SQ_CONTENTS_HI(0), REG_64BIT_HI_DW, 0, 0, 0, "RING_EXECLIST_SQ_CONTENTS"}
+
+#define COMMON_XELP_RC_CLASS \
+ { RCU_MODE, REG_32BIT, 0, 0, 0, "RCU_MODE"}
+
+#define COMMON_XELP_RC_CLASS_INSTDONE \
+ { SC_INSTDONE, REG_32BIT, 0, 0, 0, "SC_INSTDONE"}, \
+ { SC_INSTDONE_EXTRA, REG_32BIT, 0, 0, 0, "SC_INSTDONE_EXTRA"}, \
+ { SC_INSTDONE_EXTRA2, REG_32BIT, 0, 0, 0, "SC_INSTDONE_EXTRA2"}
+
+#define XELP_VEC_CLASS_REGS \
+ { SFC_DONE(0), 0, 0, 0, 0, "SFC_DONE[0]"}, \
+ { SFC_DONE(1), 0, 0, 0, 0, "SFC_DONE[1]"}, \
+ { SFC_DONE(2), 0, 0, 0, 0, "SFC_DONE[2]"}, \
+ { SFC_DONE(3), 0, 0, 0, 0, "SFC_DONE[3]"}
+
+/* XE_LP Global */
+static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = {
+ COMMON_XELP_BASE_GLOBAL,
+};
+
+/* Render / Compute Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_rc_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* Render / Compute Engine-Class */
+static const struct __guc_mmio_reg_descr xe_rc_class_regs[] = {
+ COMMON_XELP_RC_CLASS,
+ COMMON_XELP_RC_CLASS_INSTDONE,
+};
+
+/* Render / Compute Engine-Class for xehpg */
+static const struct __guc_mmio_reg_descr xe_hpg_rc_class_regs[] = {
+ COMMON_XELP_RC_CLASS,
+};
+
+/* Media Decode/Encode Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_vd_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* Video Enhancement Engine-Class */
+static const struct __guc_mmio_reg_descr xe_vec_class_regs[] = {
+ XELP_VEC_CLASS_REGS,
+};
+
+/* Video Enhancement Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_vec_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* Blitter Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_blt_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/* XE_LP - GSC Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe_lp_gsc_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+};
+
+/*
+ * Empty list to prevent warnings about unknown class/instance types
+ * as not all class/instance types have entries on all platforms.
+ */
+static const struct __guc_mmio_reg_descr empty_regs_list[] = {
+};
+
+#define TO_GCAP_DEF_OWNER(x) (GUC_CAPTURE_LIST_INDEX_##x)
+#define TO_GCAP_DEF_TYPE(x) (GUC_STATE_CAPTURE_TYPE_##x)
+#define MAKE_REGLIST(regslist, regsowner, regstype, class) \
+ { \
+ regslist, \
+ ARRAY_SIZE(regslist), \
+ TO_GCAP_DEF_OWNER(regsowner), \
+ TO_GCAP_DEF_TYPE(regstype), \
+ class \
+ }
+
+/* List of lists for legacy graphic product version < 1255 */
+static const struct __guc_mmio_reg_descr_group xe_lp_lists[] = {
+ MAKE_REGLIST(xe_lp_global_regs, PF, GLOBAL, 0),
+ MAKE_REGLIST(xe_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
+ MAKE_REGLIST(xe_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO),
+ MAKE_REGLIST(xe_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO),
+ MAKE_REGLIST(xe_vec_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
+ MAKE_REGLIST(xe_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER),
+ MAKE_REGLIST(xe_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
+ MAKE_REGLIST(xe_lp_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
+ {}
+};
+
+ /* List of lists for graphic product version >= 1255 */
+static const struct __guc_mmio_reg_descr_group xe_hpg_lists[] = {
+ MAKE_REGLIST(xe_lp_global_regs, PF, GLOBAL, 0),
+ MAKE_REGLIST(xe_hpg_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
+ MAKE_REGLIST(xe_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO),
+ MAKE_REGLIST(xe_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO),
+ MAKE_REGLIST(xe_vec_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
+ MAKE_REGLIST(xe_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER),
+ MAKE_REGLIST(xe_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
+ MAKE_REGLIST(xe_lp_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
+ {}
+};
+
+static const char * const capture_list_type_names[] = {
+ "Global",
+ "Class",
+ "Instance",
+};
+
+static const char * const capture_engine_class_names[] = {
+ "Render/Compute",
+ "Video",
+ "VideoEnhance",
+ "Blitter",
+ "GSC-Other",
+};
+
+struct __guc_capture_ads_cache {
+ bool is_valid;
+ void *ptr;
+ size_t size;
+ int status;
+};
+
+struct xe_guc_state_capture {
+ const struct __guc_mmio_reg_descr_group *reglists;
+ /**
+ * NOTE: steered registers have multiple instances depending on the HW configuration
+ * (slices or dual-sub-slices) and thus depends on HW fuses discovered
+ */
+ struct __guc_mmio_reg_descr_group *extlists;
+ struct __guc_capture_ads_cache ads_cache[GUC_CAPTURE_LIST_INDEX_MAX]
+ [GUC_STATE_CAPTURE_TYPE_MAX]
+ [GUC_CAPTURE_LIST_CLASS_MAX];
+ void *ads_null_cache;
+ struct list_head cachelist;
+#define PREALLOC_NODES_MAX_COUNT (3 * GUC_MAX_ENGINE_CLASSES * GUC_MAX_INSTANCES_PER_CLASS)
+#define PREALLOC_NODES_DEFAULT_NUMREGS 64
+
+ int max_mmio_per_node;
+ struct list_head outlist;
+};
+
+static void
+guc_capture_remove_stale_matches_from_list(struct xe_guc_state_capture *gc,
+ struct __guc_capture_parsed_output *node);
+
+static const struct __guc_mmio_reg_descr_group *
+guc_capture_get_device_reglist(struct xe_device *xe)
+{
+ if (GRAPHICS_VERx100(xe) >= 1255)
+ return xe_hpg_lists;
+ else
+ return xe_lp_lists;
+}
+
+static const struct __guc_mmio_reg_descr_group *
+guc_capture_get_one_list(const struct __guc_mmio_reg_descr_group *reglists,
+ u32 owner, u32 type, enum guc_capture_list_class_type capture_class)
+{
+ int i;
+
+ if (!reglists)
+ return NULL;
+
+ for (i = 0; reglists[i].list; ++i) {
+ if (reglists[i].owner == owner && reglists[i].type == type &&
+ (reglists[i].engine == capture_class ||
+ reglists[i].type == GUC_STATE_CAPTURE_TYPE_GLOBAL))
+ return &reglists[i];
+ }
+
+ return NULL;
+}
+
+const struct __guc_mmio_reg_descr_group *
+xe_guc_capture_get_reg_desc_list(struct xe_gt *gt, u32 owner, u32 type,
+ enum guc_capture_list_class_type capture_class, bool is_ext)
+{
+ const struct __guc_mmio_reg_descr_group *reglists;
+
+ if (is_ext) {
+ struct xe_guc *guc = &gt->uc.guc;
+
+ reglists = guc->capture->extlists;
+ } else {
+ reglists = guc_capture_get_device_reglist(gt_to_xe(gt));
+ }
+ return guc_capture_get_one_list(reglists, owner, type, capture_class);
+}
+
+struct __ext_steer_reg {
+ const char *name;
+ struct xe_reg_mcr reg;
+};
+
+static const struct __ext_steer_reg xe_extregs[] = {
+ {"SAMPLER_INSTDONE", SAMPLER_INSTDONE},
+ {"ROW_INSTDONE", ROW_INSTDONE}
+};
+
+static const struct __ext_steer_reg xehpg_extregs[] = {
+ {"SC_INSTDONE", XEHPG_SC_INSTDONE},
+ {"SC_INSTDONE_EXTRA", XEHPG_SC_INSTDONE_EXTRA},
+ {"SC_INSTDONE_EXTRA2", XEHPG_SC_INSTDONE_EXTRA2},
+ {"INSTDONE_GEOM_SVGUNIT", XEHPG_INSTDONE_GEOM_SVGUNIT}
+};
+
+static void __fill_ext_reg(struct __guc_mmio_reg_descr *ext,
+ const struct __ext_steer_reg *extlist,
+ u32 dss_id, u16 slice_id, u16 subslice_id)
+{
+ if (!ext || !extlist)
+ return;
+
+ ext->reg = XE_REG(extlist->reg.__reg.addr);
+ ext->flags = FIELD_PREP(GUC_REGSET_STEERING_NEEDED, 1);
+ ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_GROUP, slice_id);
+ ext->flags |= FIELD_PREP(GUC_REGSET_STEERING_INSTANCE, subslice_id);
+ ext->dss_id = dss_id;
+ ext->regname = extlist->name;
+}
+
+static int
+__alloc_ext_regs(struct drm_device *drm, struct __guc_mmio_reg_descr_group *newlist,
+ const struct __guc_mmio_reg_descr_group *rootlist, int num_regs)
+{
+ struct __guc_mmio_reg_descr *list;
+
+ list = drmm_kzalloc(drm, num_regs * sizeof(struct __guc_mmio_reg_descr), GFP_KERNEL);
+ if (!list)
+ return -ENOMEM;
+
+ newlist->list = list;
+ newlist->num_regs = num_regs;
+ newlist->owner = rootlist->owner;
+ newlist->engine = rootlist->engine;
+ newlist->type = rootlist->type;
+
+ return 0;
+}
+
+static int guc_capture_get_steer_reg_num(struct xe_device *xe)
+{
+ int num = ARRAY_SIZE(xe_extregs);
+
+ if (GRAPHICS_VERx100(xe) >= 1255)
+ num += ARRAY_SIZE(xehpg_extregs);
+
+ return num;
+}
+
+static void guc_capture_alloc_steered_lists(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ u16 slice, subslice;
+ int dss, i, total = 0;
+ const struct __guc_mmio_reg_descr_group *lists = guc->capture->reglists;
+ const struct __guc_mmio_reg_descr_group *list;
+ struct __guc_mmio_reg_descr_group *extlists;
+ struct __guc_mmio_reg_descr *extarray;
+ bool has_xehpg_extregs = GRAPHICS_VERx100(gt_to_xe(gt)) >= 1255;
+ struct drm_device *drm = &gt_to_xe(gt)->drm;
+ bool has_rcs_ccs = false;
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ /*
+ * If GT has no rcs/ccs, no need to alloc steered list.
+ * Currently, only rcs/ccs has steering register, if in the future,
+ * other engine types has steering register, this condition check need
+ * to be extended
+ */
+ for_each_hw_engine(hwe, gt, id) {
+ if (xe_engine_class_to_guc_capture_class(hwe->class) ==
+ GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE) {
+ has_rcs_ccs = true;
+ break;
+ }
+ }
+
+ if (!has_rcs_ccs)
+ return;
+
+ /* steered registers currently only exist for the render-class */
+ list = guc_capture_get_one_list(lists, GUC_CAPTURE_LIST_INDEX_PF,
+ GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS,
+ GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE);
+ /*
+ * Skip if this platform has no engine class registers or if extlists
+ * was previously allocated
+ */
+ if (!list || guc->capture->extlists)
+ return;
+
+ total = bitmap_weight(gt->fuse_topo.g_dss_mask, sizeof(gt->fuse_topo.g_dss_mask) * 8) *
+ guc_capture_get_steer_reg_num(guc_to_xe(guc));
+
+ if (!total)
+ return;
+
+ /* allocate an extra for an end marker */
+ extlists = drmm_kzalloc(drm, 2 * sizeof(struct __guc_mmio_reg_descr_group), GFP_KERNEL);
+ if (!extlists)
+ return;
+
+ if (__alloc_ext_regs(drm, &extlists[0], list, total)) {
+ drmm_kfree(drm, extlists);
+ return;
+ }
+
+ /* For steering registers, the list is generated at run-time */
+ extarray = (struct __guc_mmio_reg_descr *)extlists[0].list;
+ for_each_dss_steering(dss, gt, slice, subslice) {
+ for (i = 0; i < ARRAY_SIZE(xe_extregs); ++i) {
+ __fill_ext_reg(extarray, &xe_extregs[i], dss, slice, subslice);
+ ++extarray;
+ }
+
+ if (has_xehpg_extregs)
+ for (i = 0; i < ARRAY_SIZE(xehpg_extregs); ++i) {
+ __fill_ext_reg(extarray, &xehpg_extregs[i], dss, slice, subslice);
+ ++extarray;
+ }
+ }
+
+ extlists[0].num_regs = total;
+
+ xe_gt_dbg(guc_to_gt(guc), "capture found %d ext-regs.\n", total);
+ guc->capture->extlists = extlists;
+}
+
+static int
+guc_capture_list_init(struct xe_guc *guc, u32 owner, u32 type,
+ enum guc_capture_list_class_type capture_class, struct guc_mmio_reg *ptr,
+ u16 num_entries)
+{
+ u32 ptr_idx = 0, list_idx = 0;
+ const struct __guc_mmio_reg_descr_group *reglists = guc->capture->reglists;
+ struct __guc_mmio_reg_descr_group *extlists = guc->capture->extlists;
+ const struct __guc_mmio_reg_descr_group *match;
+ u32 list_num;
+
+ if (!reglists)
+ return -ENODEV;
+
+ match = guc_capture_get_one_list(reglists, owner, type, capture_class);
+ if (!match)
+ return -ENODATA;
+
+ list_num = match->num_regs;
+ for (list_idx = 0; ptr_idx < num_entries && list_idx < list_num; ++list_idx, ++ptr_idx) {
+ ptr[ptr_idx].offset = match->list[list_idx].reg.addr;
+ ptr[ptr_idx].value = 0xDEADF00D;
+ ptr[ptr_idx].flags = match->list[list_idx].flags;
+ ptr[ptr_idx].mask = match->list[list_idx].mask;
+ }
+
+ match = guc_capture_get_one_list(extlists, owner, type, capture_class);
+ if (match)
+ for (ptr_idx = list_num, list_idx = 0;
+ ptr_idx < num_entries && list_idx < match->num_regs;
+ ++ptr_idx, ++list_idx) {
+ ptr[ptr_idx].offset = match->list[list_idx].reg.addr;
+ ptr[ptr_idx].value = 0xDEADF00D;
+ ptr[ptr_idx].flags = match->list[list_idx].flags;
+ ptr[ptr_idx].mask = match->list[list_idx].mask;
+ }
+
+ if (ptr_idx < num_entries)
+ xe_gt_dbg(guc_to_gt(guc), "Got short capture reglist init: %d out-of %d.\n",
+ ptr_idx, num_entries);
+
+ return 0;
+}
+
+static int
+guc_cap_list_num_regs(struct xe_guc *guc, u32 owner, u32 type,
+ enum guc_capture_list_class_type capture_class)
+{
+ const struct __guc_mmio_reg_descr_group *match;
+ int num_regs = 0;
+
+ match = guc_capture_get_one_list(guc->capture->reglists, owner, type, capture_class);
+ if (match)
+ num_regs = match->num_regs;
+
+ match = guc_capture_get_one_list(guc->capture->extlists, owner, type, capture_class);
+ if (match)
+ num_regs += match->num_regs;
+ else
+ /*
+ * If a caller wants the full register dump size but we have
+ * not yet got the hw-config, which is before max_mmio_per_node
+ * is initialized, then provide a worst-case number for
+ * extlists based on max dss fuse bits, but only ever for
+ * render/compute
+ */
+ if (owner == GUC_CAPTURE_LIST_INDEX_PF &&
+ type == GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS &&
+ capture_class == GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE &&
+ !guc->capture->max_mmio_per_node)
+ num_regs += guc_capture_get_steer_reg_num(guc_to_xe(guc)) *
+ XE_MAX_DSS_FUSE_BITS;
+
+ return num_regs;
+}
+
+static int
+guc_capture_getlistsize(struct xe_guc *guc, u32 owner, u32 type,
+ enum guc_capture_list_class_type capture_class,
+ size_t *size, bool is_purpose_est)
+{
+ struct xe_guc_state_capture *gc = guc->capture;
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct __guc_capture_ads_cache *cache;
+ int num_regs;
+
+ xe_gt_assert(gt, type < GUC_STATE_CAPTURE_TYPE_MAX);
+ xe_gt_assert(gt, capture_class < GUC_CAPTURE_LIST_CLASS_MAX);
+
+ cache = &gc->ads_cache[owner][type][capture_class];
+ if (!gc->reglists) {
+ xe_gt_warn(gt, "No capture reglist for this device\n");
+ return -ENODEV;
+ }
+
+ if (cache->is_valid) {
+ *size = cache->size;
+ return cache->status;
+ }
+
+ if (!is_purpose_est && owner == GUC_CAPTURE_LIST_INDEX_PF &&
+ !guc_capture_get_one_list(gc->reglists, owner, type, capture_class)) {
+ if (type == GUC_STATE_CAPTURE_TYPE_GLOBAL)
+ xe_gt_warn(gt, "Missing capture reglist: global!\n");
+ else
+ xe_gt_warn(gt, "Missing capture reglist: %s(%u):%s(%u)!\n",
+ capture_list_type_names[type], type,
+ capture_engine_class_names[capture_class], capture_class);
+ return -ENODEV;
+ }
+
+ num_regs = guc_cap_list_num_regs(guc, owner, type, capture_class);
+ /* intentional empty lists can exist depending on hw config */
+ if (!num_regs)
+ return -ENODATA;
+
+ if (size)
+ *size = PAGE_ALIGN((sizeof(struct guc_debug_capture_list)) +
+ (num_regs * sizeof(struct guc_mmio_reg)));
+
+ return 0;
+}
+
+/**
+ * xe_guc_capture_getlistsize - Get list size for owner/type/class combination
+ * @guc: The GuC object
+ * @owner: PF/VF owner
+ * @type: GuC capture register type
+ * @capture_class: GuC capture engine class id
+ * @size: Point to the size
+ *
+ * This function will get the list for the owner/type/class combination, and
+ * return the page aligned list size.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int
+xe_guc_capture_getlistsize(struct xe_guc *guc, u32 owner, u32 type,
+ enum guc_capture_list_class_type capture_class, size_t *size)
+{
+ return guc_capture_getlistsize(guc, owner, type, capture_class, size, false);
+}
+
+/**
+ * xe_guc_capture_getlist - Get register capture list for owner/type/class
+ * combination
+ * @guc: The GuC object
+ * @owner: PF/VF owner
+ * @type: GuC capture register type
+ * @capture_class: GuC capture engine class id
+ * @outptr: Point to cached register capture list
+ *
+ * This function will get the register capture list for the owner/type/class
+ * combination.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int
+xe_guc_capture_getlist(struct xe_guc *guc, u32 owner, u32 type,
+ enum guc_capture_list_class_type capture_class, void **outptr)
+{
+ struct xe_guc_state_capture *gc = guc->capture;
+ struct __guc_capture_ads_cache *cache = &gc->ads_cache[owner][type][capture_class];
+ struct guc_debug_capture_list *listnode;
+ int ret, num_regs;
+ u8 *caplist, *tmp;
+ size_t size = 0;
+
+ if (!gc->reglists)
+ return -ENODEV;
+
+ if (cache->is_valid) {
+ *outptr = cache->ptr;
+ return cache->status;
+ }
+
+ ret = xe_guc_capture_getlistsize(guc, owner, type, capture_class, &size);
+ if (ret) {
+ cache->is_valid = true;
+ cache->ptr = NULL;
+ cache->size = 0;
+ cache->status = ret;
+ return ret;
+ }
+
+ caplist = drmm_kzalloc(guc_to_drm(guc), size, GFP_KERNEL);
+ if (!caplist)
+ return -ENOMEM;
+
+ /* populate capture list header */
+ tmp = caplist;
+ num_regs = guc_cap_list_num_regs(guc, owner, type, capture_class);
+ listnode = (struct guc_debug_capture_list *)tmp;
+ listnode->header.info = FIELD_PREP(GUC_CAPTURELISTHDR_NUMDESCR, (u32)num_regs);
+
+ /* populate list of register descriptor */
+ tmp += sizeof(struct guc_debug_capture_list);
+ guc_capture_list_init(guc, owner, type, capture_class,
+ (struct guc_mmio_reg *)tmp, num_regs);
+
+ /* cache this list */
+ cache->is_valid = true;
+ cache->ptr = caplist;
+ cache->size = size;
+ cache->status = 0;
+
+ *outptr = caplist;
+
+ return 0;
+}
+
+/**
+ * xe_guc_capture_getnullheader - Get a null list for register capture
+ * @guc: The GuC object
+ * @outptr: Point to cached register capture list
+ * @size: Point to the size
+ *
+ * This function will alloc for a null list for register capture.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+int
+xe_guc_capture_getnullheader(struct xe_guc *guc, void **outptr, size_t *size)
+{
+ struct xe_guc_state_capture *gc = guc->capture;
+ int tmp = sizeof(u32) * 4;
+ void *null_header;
+
+ if (gc->ads_null_cache) {
+ *outptr = gc->ads_null_cache;
+ *size = tmp;
+ return 0;
+ }
+
+ null_header = drmm_kzalloc(guc_to_drm(guc), tmp, GFP_KERNEL);
+ if (!null_header)
+ return -ENOMEM;
+
+ gc->ads_null_cache = null_header;
+ *outptr = null_header;
+ *size = tmp;
+
+ return 0;
+}
+
+/**
+ * xe_guc_capture_ads_input_worst_size - Calculate the worst size for GuC register capture
+ * @guc: point to xe_guc structure
+ *
+ * Calculate the worst size for GuC register capture by including all possible engines classes.
+ *
+ * Returns: Calculated size
+ */
+size_t xe_guc_capture_ads_input_worst_size(struct xe_guc *guc)
+{
+ size_t total_size, class_size, instance_size, global_size;
+ int i, j;
+
+ /*
+ * This function calculates the worst case register lists size by
+ * including all possible engines classes. It is called during the
+ * first of a two-phase GuC (and ADS-population) initialization
+ * sequence, that is, during the pre-hwconfig phase before we have
+ * the exact engine fusing info.
+ */
+ total_size = PAGE_SIZE; /* Pad a page in front for empty lists */
+ for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; i++) {
+ for (j = 0; j < GUC_CAPTURE_LIST_CLASS_MAX; j++) {
+ if (xe_guc_capture_getlistsize(guc, i,
+ GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS,
+ j, &class_size) < 0)
+ class_size = 0;
+ if (xe_guc_capture_getlistsize(guc, i,
+ GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE,
+ j, &instance_size) < 0)
+ instance_size = 0;
+ total_size += class_size + instance_size;
+ }
+ if (xe_guc_capture_getlistsize(guc, i,
+ GUC_STATE_CAPTURE_TYPE_GLOBAL,
+ 0, &global_size) < 0)
+ global_size = 0;
+ total_size += global_size;
+ }
+
+ return PAGE_ALIGN(total_size);
+}
+
+static int guc_capture_output_size_est(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ int capture_size = 0;
+ size_t tmp = 0;
+
+ if (!guc->capture)
+ return -ENODEV;
+
+ /*
+ * If every single engine-instance suffered a failure in quick succession but
+ * were all unrelated, then a burst of multiple error-capture events would dump
+ * registers for every one engine instance, one at a time. In this case, GuC
+ * would even dump the global-registers repeatedly.
+ *
+ * For each engine instance, there would be 1 x guc_state_capture_group_t output
+ * followed by 3 x guc_state_capture_t lists. The latter is how the register
+ * dumps are split across different register types (where the '3' are global vs class
+ * vs instance).
+ */
+ for_each_hw_engine(hwe, gt, id) {
+ enum guc_capture_list_class_type capture_class;
+
+ capture_class = xe_engine_class_to_guc_capture_class(hwe->class);
+ capture_size += sizeof(struct guc_state_capture_group_header_t) +
+ (3 * sizeof(struct guc_state_capture_header_t));
+
+ if (!guc_capture_getlistsize(guc, 0, GUC_STATE_CAPTURE_TYPE_GLOBAL,
+ 0, &tmp, true))
+ capture_size += tmp;
+ if (!guc_capture_getlistsize(guc, 0, GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS,
+ capture_class, &tmp, true))
+ capture_size += tmp;
+ if (!guc_capture_getlistsize(guc, 0, GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE,
+ capture_class, &tmp, true))
+ capture_size += tmp;
+ }
+
+ return capture_size;
+}
+
+/*
+ * Add on a 3x multiplier to allow for multiple back-to-back captures occurring
+ * before the Xe can read the data out and process it
+ */
+#define GUC_CAPTURE_OVERBUFFER_MULTIPLIER 3
+
+static void check_guc_capture_size(struct xe_guc *guc)
+{
+ int capture_size = guc_capture_output_size_est(guc);
+ int spare_size = capture_size * GUC_CAPTURE_OVERBUFFER_MULTIPLIER;
+ u32 buffer_size = xe_guc_log_section_size_capture(&guc->log);
+
+ /*
+ * NOTE: capture_size is much smaller than the capture region
+ * allocation (DG2: <80K vs 1MB).
+ * Additionally, its based on space needed to fit all engines getting
+ * reset at once within the same G2H handler task slot. This is very
+ * unlikely. However, if GuC really does run out of space for whatever
+ * reason, we will see an separate warning message when processing the
+ * G2H event capture-notification, search for:
+ * xe_guc_STATE_CAPTURE_EVENT_STATUS_NOSPACE.
+ */
+ if (capture_size < 0)
+ xe_gt_dbg(guc_to_gt(guc),
+ "Failed to calculate error state capture buffer minimum size: %d!\n",
+ capture_size);
+ if (capture_size > buffer_size)
+ xe_gt_dbg(guc_to_gt(guc), "Error state capture buffer maybe small: %d < %d\n",
+ buffer_size, capture_size);
+ else if (spare_size > buffer_size)
+ xe_gt_dbg(guc_to_gt(guc),
+ "Error state capture buffer lacks spare size: %d < %d (min = %d)\n",
+ buffer_size, spare_size, capture_size);
+}
+
+static void
+guc_capture_add_node_to_list(struct __guc_capture_parsed_output *node,
+ struct list_head *list)
+{
+ list_add(&node->link, list);
+}
+
+static void
+guc_capture_add_node_to_outlist(struct xe_guc_state_capture *gc,
+ struct __guc_capture_parsed_output *node)
+{
+ guc_capture_remove_stale_matches_from_list(gc, node);
+ guc_capture_add_node_to_list(node, &gc->outlist);
+}
+
+static void
+guc_capture_add_node_to_cachelist(struct xe_guc_state_capture *gc,
+ struct __guc_capture_parsed_output *node)
+{
+ guc_capture_add_node_to_list(node, &gc->cachelist);
+}
+
+static void
+guc_capture_free_outlist_node(struct xe_guc_state_capture *gc,
+ struct __guc_capture_parsed_output *n)
+{
+ if (n) {
+ n->locked = 0;
+ list_del(&n->link);
+ /* put node back to cache list */
+ guc_capture_add_node_to_cachelist(gc, n);
+ }
+}
+
+static void
+guc_capture_remove_stale_matches_from_list(struct xe_guc_state_capture *gc,
+ struct __guc_capture_parsed_output *node)
+{
+ struct __guc_capture_parsed_output *n, *ntmp;
+ int guc_id = node->guc_id;
+
+ list_for_each_entry_safe(n, ntmp, &gc->outlist, link) {
+ if (n != node && !n->locked && n->guc_id == guc_id)
+ guc_capture_free_outlist_node(gc, n);
+ }
+}
+
+static void
+guc_capture_init_node(struct xe_guc *guc, struct __guc_capture_parsed_output *node)
+{
+ struct guc_mmio_reg *tmp[GUC_STATE_CAPTURE_TYPE_MAX];
+ int i;
+
+ for (i = 0; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i) {
+ tmp[i] = node->reginfo[i].regs;
+ memset(tmp[i], 0, sizeof(struct guc_mmio_reg) *
+ guc->capture->max_mmio_per_node);
+ }
+ memset(node, 0, sizeof(*node));
+ for (i = 0; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i)
+ node->reginfo[i].regs = tmp[i];
+
+ INIT_LIST_HEAD(&node->link);
+}
+
+/**
+ * DOC: Init, G2H-event and reporting flows for GuC-error-capture
+ *
+ * KMD Init time flows:
+ * --------------------
+ * --> alloc A: GuC input capture regs lists (registered to GuC via ADS).
+ * xe_guc_ads acquires the register lists by calling
+ * xe_guc_capture_getlistsize and xe_guc_capture_getlist 'n' times,
+ * where n = 1 for global-reg-list +
+ * num_engine_classes for class-reg-list +
+ * num_engine_classes for instance-reg-list
+ * (since all instances of the same engine-class type
+ * have an identical engine-instance register-list).
+ * ADS module also calls separately for PF vs VF.
+ *
+ * --> alloc B: GuC output capture buf (registered via guc_init_params(log_param))
+ * Size = #define CAPTURE_BUFFER_SIZE (warns if on too-small)
+ * Note2: 'x 3' to hold multiple capture groups
+ *
+ * GUC Runtime notify capture:
+ * --------------------------
+ * --> G2H STATE_CAPTURE_NOTIFICATION
+ * L--> xe_guc_capture_process
+ * L--> Loop through B (head..tail) and for each engine instance's
+ * err-state-captured register-list we find, we alloc 'C':
+ * --> alloc C: A capture-output-node structure that includes misc capture info along
+ * with 3 register list dumps (global, engine-class and engine-instance)
+ * This node is created from a pre-allocated list of blank nodes in
+ * guc->capture->cachelist and populated with the error-capture
+ * data from GuC and then it's added into guc->capture->outlist linked
+ * list. This list is used for matchup and printout by xe_devcoredump_read
+ * and xe_engine_snapshot_print, (when user invokes the devcoredump sysfs).
+ *
+ * GUC --> notify context reset:
+ * -----------------------------
+ * --> guc_exec_queue_timedout_job
+ * L--> xe_devcoredump
+ * L--> devcoredump_snapshot
+ * --> xe_hw_engine_snapshot_capture
+ * --> xe_engine_manual_capture(For manual capture)
+ *
+ * User Sysfs / Debugfs
+ * --------------------
+ * --> xe_devcoredump_read->
+ * L--> xxx_snapshot_print
+ * L--> xe_engine_snapshot_print
+ * Print register lists values saved at
+ * guc->capture->outlist
+ *
+ */
+
+static int guc_capture_buf_cnt(struct __guc_capture_bufstate *buf)
+{
+ if (buf->wr >= buf->rd)
+ return (buf->wr - buf->rd);
+ return (buf->size - buf->rd) + buf->wr;
+}
+
+static int guc_capture_buf_cnt_to_end(struct __guc_capture_bufstate *buf)
+{
+ if (buf->rd > buf->wr)
+ return (buf->size - buf->rd);
+ return (buf->wr - buf->rd);
+}
+
+/*
+ * GuC's error-capture output is a ring buffer populated in a byte-stream fashion:
+ *
+ * The GuC Log buffer region for error-capture is managed like a ring buffer.
+ * The GuC firmware dumps error capture logs into this ring in a byte-stream flow.
+ * Additionally, as per the current and foreseeable future, all packed error-
+ * capture output structures are dword aligned.
+ *
+ * That said, if the GuC firmware is in the midst of writing a structure that is larger
+ * than one dword but the tail end of the err-capture buffer-region has lesser space left,
+ * we would need to extract that structure one dword at a time straddled across the end,
+ * onto the start of the ring.
+ *
+ * Below function, guc_capture_log_remove_bytes is a helper for that. All callers of this
+ * function would typically do a straight-up memcpy from the ring contents and will only
+ * call this helper if their structure-extraction is straddling across the end of the
+ * ring. GuC firmware does not add any padding. The reason for the no-padding is to ease
+ * scalability for future expansion of output data types without requiring a redesign
+ * of the flow controls.
+ */
+static int
+guc_capture_log_remove_bytes(struct xe_guc *guc, struct __guc_capture_bufstate *buf,
+ void *out, int bytes_needed)
+{
+#define GUC_CAPTURE_LOG_BUF_COPY_RETRY_MAX 3
+
+ int fill_size = 0, tries = GUC_CAPTURE_LOG_BUF_COPY_RETRY_MAX;
+ int copy_size, avail;
+
+ xe_assert(guc_to_xe(guc), bytes_needed % sizeof(u32) == 0);
+
+ if (bytes_needed > guc_capture_buf_cnt(buf))
+ return -1;
+
+ while (bytes_needed > 0 && tries--) {
+ int misaligned;
+
+ avail = guc_capture_buf_cnt_to_end(buf);
+ misaligned = avail % sizeof(u32);
+ /* wrap if at end */
+ if (!avail) {
+ /* output stream clipped */
+ if (!buf->rd)
+ return fill_size;
+ buf->rd = 0;
+ continue;
+ }
+
+ /* Only copy to u32 aligned data */
+ copy_size = avail < bytes_needed ? avail - misaligned : bytes_needed;
+ xe_map_memcpy_from(guc_to_xe(guc), out + fill_size, &guc->log.bo->vmap,
+ buf->data_offset + buf->rd, copy_size);
+ buf->rd += copy_size;
+ fill_size += copy_size;
+ bytes_needed -= copy_size;
+
+ if (misaligned)
+ xe_gt_warn(guc_to_gt(guc),
+ "Bytes extraction not dword aligned, clipping.\n");
+ }
+
+ return fill_size;
+}
+
+static int
+guc_capture_log_get_group_hdr(struct xe_guc *guc, struct __guc_capture_bufstate *buf,
+ struct guc_state_capture_group_header_t *ghdr)
+{
+ int fullsize = sizeof(struct guc_state_capture_group_header_t);
+
+ if (guc_capture_log_remove_bytes(guc, buf, ghdr, fullsize) != fullsize)
+ return -1;
+ return 0;
+}
+
+static int
+guc_capture_log_get_data_hdr(struct xe_guc *guc, struct __guc_capture_bufstate *buf,
+ struct guc_state_capture_header_t *hdr)
+{
+ int fullsize = sizeof(struct guc_state_capture_header_t);
+
+ if (guc_capture_log_remove_bytes(guc, buf, hdr, fullsize) != fullsize)
+ return -1;
+ return 0;
+}
+
+static int
+guc_capture_log_get_register(struct xe_guc *guc, struct __guc_capture_bufstate *buf,
+ struct guc_mmio_reg *reg)
+{
+ int fullsize = sizeof(struct guc_mmio_reg);
+
+ if (guc_capture_log_remove_bytes(guc, buf, reg, fullsize) != fullsize)
+ return -1;
+ return 0;
+}
+
+static struct __guc_capture_parsed_output *
+guc_capture_get_prealloc_node(struct xe_guc *guc)
+{
+ struct __guc_capture_parsed_output *found = NULL;
+
+ if (!list_empty(&guc->capture->cachelist)) {
+ struct __guc_capture_parsed_output *n, *ntmp;
+
+ /* get first avail node from the cache list */
+ list_for_each_entry_safe(n, ntmp, &guc->capture->cachelist, link) {
+ found = n;
+ break;
+ }
+ } else {
+ struct __guc_capture_parsed_output *n, *ntmp;
+
+ /*
+ * traverse reversed and steal back the oldest node already
+ * allocated
+ */
+ list_for_each_entry_safe_reverse(n, ntmp, &guc->capture->outlist, link) {
+ if (!n->locked)
+ found = n;
+ }
+ }
+ if (found) {
+ list_del(&found->link);
+ guc_capture_init_node(guc, found);
+ }
+
+ return found;
+}
+
+static struct __guc_capture_parsed_output *
+guc_capture_clone_node(struct xe_guc *guc, struct __guc_capture_parsed_output *original,
+ u32 keep_reglist_mask)
+{
+ struct __guc_capture_parsed_output *new;
+ int i;
+
+ new = guc_capture_get_prealloc_node(guc);
+ if (!new)
+ return NULL;
+ if (!original)
+ return new;
+
+ new->is_partial = original->is_partial;
+
+ /* copy reg-lists that we want to clone */
+ for (i = 0; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i) {
+ if (keep_reglist_mask & BIT(i)) {
+ XE_WARN_ON(original->reginfo[i].num_regs >
+ guc->capture->max_mmio_per_node);
+
+ memcpy(new->reginfo[i].regs, original->reginfo[i].regs,
+ original->reginfo[i].num_regs * sizeof(struct guc_mmio_reg));
+
+ new->reginfo[i].num_regs = original->reginfo[i].num_regs;
+ new->reginfo[i].vfid = original->reginfo[i].vfid;
+
+ if (i == GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS) {
+ new->eng_class = original->eng_class;
+ } else if (i == GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE) {
+ new->eng_inst = original->eng_inst;
+ new->guc_id = original->guc_id;
+ new->lrca = original->lrca;
+ }
+ }
+ }
+
+ return new;
+}
+
+static int
+guc_capture_extract_reglists(struct xe_guc *guc, struct __guc_capture_bufstate *buf)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct guc_state_capture_group_header_t ghdr = {0};
+ struct guc_state_capture_header_t hdr = {0};
+ struct __guc_capture_parsed_output *node = NULL;
+ struct guc_mmio_reg *regs = NULL;
+ int i, numlists, numregs, ret = 0;
+ enum guc_state_capture_type datatype;
+ struct guc_mmio_reg tmp;
+ bool is_partial = false;
+
+ i = guc_capture_buf_cnt(buf);
+ if (!i)
+ return -ENODATA;
+
+ if (i % sizeof(u32)) {
+ xe_gt_warn(gt, "Got mis-aligned register capture entries\n");
+ ret = -EIO;
+ goto bailout;
+ }
+
+ /* first get the capture group header */
+ if (guc_capture_log_get_group_hdr(guc, buf, &ghdr)) {
+ ret = -EIO;
+ goto bailout;
+ }
+ /*
+ * we would typically expect a layout as below where n would be expected to be
+ * anywhere between 3 to n where n > 3 if we are seeing multiple dependent engine
+ * instances being reset together.
+ * ____________________________________________
+ * | Capture Group |
+ * | ________________________________________ |
+ * | | Capture Group Header: | |
+ * | | - num_captures = 5 | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture1: | |
+ * | | Hdr: GLOBAL, numregs=a | |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... rega | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture2: | |
+ * | | Hdr: CLASS=RENDER/COMPUTE, numregs=b| |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... regb | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture3: | |
+ * | | Hdr: INSTANCE=RCS, numregs=c | |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... regc | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture4: | |
+ * | | Hdr: CLASS=RENDER/COMPUTE, numregs=d| |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... regd | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * | ________________________________________ |
+ * | | Capture5: | |
+ * | | Hdr: INSTANCE=CCS0, numregs=e | |
+ * | | ____________________________________ | |
+ * | | | Reglist | | |
+ * | | | - reg1, reg2, ... rege | | |
+ * | | |__________________________________| | |
+ * | |______________________________________| |
+ * |__________________________________________|
+ */
+ is_partial = FIELD_GET(GUC_STATE_CAPTURE_GROUP_HEADER_CAPTURE_GROUP_TYPE, ghdr.info);
+ numlists = FIELD_GET(GUC_STATE_CAPTURE_GROUP_HEADER_NUM_CAPTURES, ghdr.info);
+
+ while (numlists--) {
+ if (guc_capture_log_get_data_hdr(guc, buf, &hdr)) {
+ ret = -EIO;
+ break;
+ }
+
+ datatype = FIELD_GET(GUC_STATE_CAPTURE_HEADER_CAPTURE_TYPE, hdr.info);
+ if (datatype > GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE) {
+ /* unknown capture type - skip over to next capture set */
+ numregs = FIELD_GET(GUC_STATE_CAPTURE_HEADER_NUM_MMIO_ENTRIES,
+ hdr.num_mmio_entries);
+ while (numregs--) {
+ if (guc_capture_log_get_register(guc, buf, &tmp)) {
+ ret = -EIO;
+ break;
+ }
+ }
+ continue;
+ } else if (node) {
+ /*
+ * Based on the current capture type and what we have so far,
+ * decide if we should add the current node into the internal
+ * linked list for match-up when xe_devcoredump calls later
+ * (and alloc a blank node for the next set of reglists)
+ * or continue with the same node or clone the current node
+ * but only retain the global or class registers (such as the
+ * case of dependent engine resets).
+ */
+ if (datatype == GUC_STATE_CAPTURE_TYPE_GLOBAL) {
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = NULL;
+ } else if (datatype == GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS &&
+ node->reginfo[GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS].num_regs) {
+ /* Add to list, clone node and duplicate global list */
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = guc_capture_clone_node(guc, node,
+ GCAP_PARSED_REGLIST_INDEX_GLOBAL);
+ } else if (datatype == GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE &&
+ node->reginfo[GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE].num_regs) {
+ /* Add to list, clone node and duplicate global + class lists */
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = guc_capture_clone_node(guc, node,
+ (GCAP_PARSED_REGLIST_INDEX_GLOBAL |
+ GCAP_PARSED_REGLIST_INDEX_ENGCLASS));
+ }
+ }
+
+ if (!node) {
+ node = guc_capture_get_prealloc_node(guc);
+ if (!node) {
+ ret = -ENOMEM;
+ break;
+ }
+ if (datatype != GUC_STATE_CAPTURE_TYPE_GLOBAL)
+ xe_gt_dbg(gt, "Register capture missing global dump: %08x!\n",
+ datatype);
+ }
+ node->is_partial = is_partial;
+ node->reginfo[datatype].vfid = FIELD_GET(GUC_STATE_CAPTURE_HEADER_VFID, hdr.owner);
+ node->source = XE_ENGINE_CAPTURE_SOURCE_GUC;
+ node->type = datatype;
+
+ switch (datatype) {
+ case GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE:
+ node->eng_class = FIELD_GET(GUC_STATE_CAPTURE_HEADER_ENGINE_CLASS,
+ hdr.info);
+ node->eng_inst = FIELD_GET(GUC_STATE_CAPTURE_HEADER_ENGINE_INSTANCE,
+ hdr.info);
+ node->lrca = hdr.lrca;
+ node->guc_id = hdr.guc_id;
+ break;
+ case GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS:
+ node->eng_class = FIELD_GET(GUC_STATE_CAPTURE_HEADER_ENGINE_CLASS,
+ hdr.info);
+ break;
+ default:
+ break;
+ }
+
+ numregs = FIELD_GET(GUC_STATE_CAPTURE_HEADER_NUM_MMIO_ENTRIES,
+ hdr.num_mmio_entries);
+ if (numregs > guc->capture->max_mmio_per_node) {
+ xe_gt_dbg(gt, "Register capture list extraction clipped by prealloc!\n");
+ numregs = guc->capture->max_mmio_per_node;
+ }
+ node->reginfo[datatype].num_regs = numregs;
+ regs = node->reginfo[datatype].regs;
+ i = 0;
+ while (numregs--) {
+ if (guc_capture_log_get_register(guc, buf, &regs[i++])) {
+ ret = -EIO;
+ break;
+ }
+ }
+ }
+
+bailout:
+ if (node) {
+ /* If we have data, add to linked list for match-up when xe_devcoredump calls */
+ for (i = GUC_STATE_CAPTURE_TYPE_GLOBAL; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i) {
+ if (node->reginfo[i].regs) {
+ guc_capture_add_node_to_outlist(guc->capture, node);
+ node = NULL;
+ break;
+ }
+ }
+ if (node) /* else return it back to cache list */
+ guc_capture_add_node_to_cachelist(guc->capture, node);
+ }
+ return ret;
+}
+
+static int __guc_capture_flushlog_complete(struct xe_guc *guc)
+{
+ u32 action[] = {
+ XE_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE,
+ GUC_LOG_BUFFER_CAPTURE
+ };
+
+ return xe_guc_ct_send_g2h_handler(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+static void __guc_capture_process_output(struct xe_guc *guc)
+{
+ unsigned int buffer_size, read_offset, write_offset, full_count;
+ struct xe_uc *uc = container_of(guc, typeof(*uc), guc);
+ struct guc_log_buffer_state log_buf_state_local;
+ struct __guc_capture_bufstate buf;
+ bool new_overflow;
+ int ret, tmp;
+ u32 log_buf_state_offset;
+ u32 src_data_offset;
+
+ log_buf_state_offset = sizeof(struct guc_log_buffer_state) * GUC_LOG_BUFFER_CAPTURE;
+ src_data_offset = xe_guc_get_log_buffer_offset(&guc->log, GUC_LOG_BUFFER_CAPTURE);
+
+ /*
+ * Make a copy of the state structure, inside GuC log buffer
+ * (which is uncached mapped), on the stack to avoid reading
+ * from it multiple times.
+ */
+ xe_map_memcpy_from(guc_to_xe(guc), &log_buf_state_local, &guc->log.bo->vmap,
+ log_buf_state_offset, sizeof(struct guc_log_buffer_state));
+
+ buffer_size = xe_guc_get_log_buffer_size(&guc->log, GUC_LOG_BUFFER_CAPTURE);
+ read_offset = log_buf_state_local.read_ptr;
+ write_offset = log_buf_state_local.sampled_write_ptr;
+ full_count = FIELD_GET(GUC_LOG_BUFFER_STATE_BUFFER_FULL_CNT, log_buf_state_local.flags);
+
+ /* Bookkeeping stuff */
+ tmp = FIELD_GET(GUC_LOG_BUFFER_STATE_FLUSH_TO_FILE, log_buf_state_local.flags);
+ guc->log.stats[GUC_LOG_BUFFER_CAPTURE].flush += tmp;
+ new_overflow = xe_guc_check_log_buf_overflow(&guc->log, GUC_LOG_BUFFER_CAPTURE,
+ full_count);
+
+ /* Now copy the actual logs. */
+ if (unlikely(new_overflow)) {
+ /* copy the whole buffer in case of overflow */
+ read_offset = 0;
+ write_offset = buffer_size;
+ } else if (unlikely((read_offset > buffer_size) ||
+ (write_offset > buffer_size))) {
+ xe_gt_err(guc_to_gt(guc),
+ "Register capture buffer in invalid state: read = 0x%X, size = 0x%X!\n",
+ read_offset, buffer_size);
+ /* copy whole buffer as offsets are unreliable */
+ read_offset = 0;
+ write_offset = buffer_size;
+ }
+
+ buf.size = buffer_size;
+ buf.rd = read_offset;
+ buf.wr = write_offset;
+ buf.data_offset = src_data_offset;
+
+ if (!xe_guc_read_stopped(guc)) {
+ do {
+ ret = guc_capture_extract_reglists(guc, &buf);
+ if (ret && ret != -ENODATA)
+ xe_gt_dbg(guc_to_gt(guc), "Capture extraction failed:%d\n", ret);
+ } while (ret >= 0);
+ }
+
+ /* Update the state of log buffer err-cap state */
+ xe_map_wr(guc_to_xe(guc), &guc->log.bo->vmap,
+ log_buf_state_offset + offsetof(struct guc_log_buffer_state, read_ptr), u32,
+ write_offset);
+
+ /*
+ * Clear the flush_to_file from local first, the local was loaded by above
+ * xe_map_memcpy_from, then write out the "updated local" through
+ * xe_map_wr()
+ */
+ log_buf_state_local.flags &= ~GUC_LOG_BUFFER_STATE_FLUSH_TO_FILE;
+ xe_map_wr(guc_to_xe(guc), &guc->log.bo->vmap,
+ log_buf_state_offset + offsetof(struct guc_log_buffer_state, flags), u32,
+ log_buf_state_local.flags);
+ __guc_capture_flushlog_complete(guc);
+}
+
+/*
+ * xe_guc_capture_process - Process GuC register captured data
+ * @guc: The GuC object
+ *
+ * When GuC captured data is ready, GuC will send message
+ * XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION to host, this function will be
+ * called to process the data comes with the message.
+ *
+ * Returns: None
+ */
+void xe_guc_capture_process(struct xe_guc *guc)
+{
+ if (guc->capture)
+ __guc_capture_process_output(guc);
+}
+
+static struct __guc_capture_parsed_output *
+guc_capture_alloc_one_node(struct xe_guc *guc)
+{
+ struct drm_device *drm = guc_to_drm(guc);
+ struct __guc_capture_parsed_output *new;
+ int i;
+
+ new = drmm_kzalloc(drm, sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ for (i = 0; i < GUC_STATE_CAPTURE_TYPE_MAX; ++i) {
+ new->reginfo[i].regs = drmm_kzalloc(drm, guc->capture->max_mmio_per_node *
+ sizeof(struct guc_mmio_reg), GFP_KERNEL);
+ if (!new->reginfo[i].regs) {
+ while (i)
+ drmm_kfree(drm, new->reginfo[--i].regs);
+ drmm_kfree(drm, new);
+ return NULL;
+ }
+ }
+ guc_capture_init_node(guc, new);
+
+ return new;
+}
+
+static void
+__guc_capture_create_prealloc_nodes(struct xe_guc *guc)
+{
+ struct __guc_capture_parsed_output *node = NULL;
+ int i;
+
+ for (i = 0; i < PREALLOC_NODES_MAX_COUNT; ++i) {
+ node = guc_capture_alloc_one_node(guc);
+ if (!node) {
+ xe_gt_warn(guc_to_gt(guc), "Register capture pre-alloc-cache failure\n");
+ /* dont free the priors, use what we got and cleanup at shutdown */
+ return;
+ }
+ guc_capture_add_node_to_cachelist(guc->capture, node);
+ }
+}
+
+static int
+guc_get_max_reglist_count(struct xe_guc *guc)
+{
+ int i, j, k, tmp, maxregcount = 0;
+
+ for (i = 0; i < GUC_CAPTURE_LIST_INDEX_MAX; ++i) {
+ for (j = 0; j < GUC_STATE_CAPTURE_TYPE_MAX; ++j) {
+ for (k = 0; k < GUC_CAPTURE_LIST_CLASS_MAX; ++k) {
+ const struct __guc_mmio_reg_descr_group *match;
+
+ if (j == GUC_STATE_CAPTURE_TYPE_GLOBAL && k > 0)
+ continue;
+
+ tmp = 0;
+ match = guc_capture_get_one_list(guc->capture->reglists, i, j, k);
+ if (match)
+ tmp = match->num_regs;
+
+ match = guc_capture_get_one_list(guc->capture->extlists, i, j, k);
+ if (match)
+ tmp += match->num_regs;
+
+ if (tmp > maxregcount)
+ maxregcount = tmp;
+ }
+ }
+ }
+ if (!maxregcount)
+ maxregcount = PREALLOC_NODES_DEFAULT_NUMREGS;
+
+ return maxregcount;
+}
+
+static void
+guc_capture_create_prealloc_nodes(struct xe_guc *guc)
+{
+ /* skip if we've already done the pre-alloc */
+ if (guc->capture->max_mmio_per_node)
+ return;
+
+ guc->capture->max_mmio_per_node = guc_get_max_reglist_count(guc);
+ __guc_capture_create_prealloc_nodes(guc);
+}
+
+static void
+read_reg_to_node(struct xe_hw_engine *hwe, const struct __guc_mmio_reg_descr_group *list,
+ struct guc_mmio_reg *regs)
+{
+ int i;
+
+ if (!list || !list->list || list->num_regs == 0)
+ return;
+
+ if (!regs)
+ return;
+
+ for (i = 0; i < list->num_regs; i++) {
+ struct __guc_mmio_reg_descr desc = list->list[i];
+ u32 value;
+
+ if (list->type == GUC_STATE_CAPTURE_TYPE_ENGINE_INSTANCE) {
+ value = xe_hw_engine_mmio_read32(hwe, desc.reg);
+ } else {
+ if (list->type == GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS &&
+ FIELD_GET(GUC_REGSET_STEERING_NEEDED, desc.flags)) {
+ int group, instance;
+
+ group = FIELD_GET(GUC_REGSET_STEERING_GROUP, desc.flags);
+ instance = FIELD_GET(GUC_REGSET_STEERING_INSTANCE, desc.flags);
+ value = xe_gt_mcr_unicast_read(hwe->gt, XE_REG_MCR(desc.reg.addr),
+ group, instance);
+ } else {
+ value = xe_mmio_read32(&hwe->gt->mmio, desc.reg);
+ }
+ }
+
+ regs[i].value = value;
+ regs[i].offset = desc.reg.addr;
+ regs[i].flags = desc.flags;
+ regs[i].mask = desc.mask;
+ }
+}
+
+/**
+ * xe_engine_manual_capture - Take a manual engine snapshot from engine.
+ * @hwe: Xe HW Engine.
+ * @snapshot: The engine snapshot
+ *
+ * Take engine snapshot from engine read.
+ *
+ * Returns: None
+ */
+void
+xe_engine_manual_capture(struct xe_hw_engine *hwe, struct xe_hw_engine_snapshot *snapshot)
+{
+ struct xe_gt *gt = hwe->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_guc *guc = &gt->uc.guc;
+ struct xe_devcoredump *devcoredump = &xe->devcoredump;
+ enum guc_capture_list_class_type capture_class;
+ const struct __guc_mmio_reg_descr_group *list;
+ struct __guc_capture_parsed_output *new;
+ enum guc_state_capture_type type;
+ u16 guc_id = 0;
+ u32 lrca = 0;
+
+ if (IS_SRIOV_VF(xe))
+ return;
+
+ new = guc_capture_get_prealloc_node(guc);
+ if (!new)
+ return;
+
+ capture_class = xe_engine_class_to_guc_capture_class(hwe->class);
+ for (type = GUC_STATE_CAPTURE_TYPE_GLOBAL; type < GUC_STATE_CAPTURE_TYPE_MAX; type++) {
+ struct gcap_reg_list_info *reginfo = &new->reginfo[type];
+ /*
+ * regsinfo->regs is allocated based on guc->capture->max_mmio_per_node
+ * which is based on the descriptor list driving the population so
+ * should not overflow
+ */
+
+ /* Get register list for the type/class */
+ list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF, type,
+ capture_class, false);
+ if (!list) {
+ xe_gt_dbg(gt, "Empty GuC capture register descriptor for %s",
+ hwe->name);
+ continue;
+ }
+
+ read_reg_to_node(hwe, list, reginfo->regs);
+ reginfo->num_regs = list->num_regs;
+
+ /* Capture steering registers for rcs/ccs */
+ if (capture_class == GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE) {
+ list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF,
+ type, capture_class, true);
+ if (list) {
+ read_reg_to_node(hwe, list, &reginfo->regs[reginfo->num_regs]);
+ reginfo->num_regs += list->num_regs;
+ }
+ }
+ }
+
+ if (devcoredump && devcoredump->captured) {
+ struct xe_guc_submit_exec_queue_snapshot *ge = devcoredump->snapshot.ge;
+
+ if (ge) {
+ guc_id = ge->guc.id;
+ if (ge->lrc[0])
+ lrca = ge->lrc[0]->context_desc;
+ }
+ }
+
+ new->eng_class = xe_engine_class_to_guc_class(hwe->class);
+ new->eng_inst = hwe->instance;
+ new->guc_id = guc_id;
+ new->lrca = lrca;
+ new->is_partial = 0;
+ new->locked = 1;
+ new->source = XE_ENGINE_CAPTURE_SOURCE_MANUAL;
+
+ guc_capture_add_node_to_outlist(guc->capture, new);
+ devcoredump->snapshot.matched_node = new;
+}
+
+static struct guc_mmio_reg *
+guc_capture_find_reg(struct gcap_reg_list_info *reginfo, u32 addr, u32 flags)
+{
+ int i;
+
+ if (reginfo && reginfo->num_regs > 0) {
+ struct guc_mmio_reg *regs = reginfo->regs;
+
+ if (regs)
+ for (i = 0; i < reginfo->num_regs; i++)
+ if (regs[i].offset == addr && regs[i].flags == flags)
+ return &regs[i];
+ }
+
+ return NULL;
+}
+
+static void
+snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p,
+ u32 type, const struct __guc_mmio_reg_descr_group *list)
+{
+ struct xe_gt *gt = snapshot->hwe->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_devcoredump *devcoredump = &xe->devcoredump;
+ struct xe_devcoredump_snapshot *devcore_snapshot = &devcoredump->snapshot;
+ struct gcap_reg_list_info *reginfo = NULL;
+ u32 i, last_value = 0;
+ bool low32_ready = false;
+
+ if (!list || !list->list || list->num_regs == 0)
+ return;
+ XE_WARN_ON(!devcore_snapshot->matched_node);
+
+ reginfo = &devcore_snapshot->matched_node->reginfo[type];
+
+ /*
+ * loop through descriptor first and find the register in the node
+ * this is more scalable for developer maintenance as it will ensure
+ * the printout matched the ordering of the static descriptor
+ * table-of-lists
+ */
+ for (i = 0; i < list->num_regs; i++) {
+ const struct __guc_mmio_reg_descr *reg_desc = &list->list[i];
+ struct guc_mmio_reg *reg;
+ u32 value;
+
+ reg = guc_capture_find_reg(reginfo, reg_desc->reg.addr, reg_desc->flags);
+ if (!reg)
+ continue;
+
+ value = reg->value;
+ switch (reg_desc->data_type) {
+ case REG_64BIT_LOW_DW:
+ last_value = value;
+
+ /*
+ * A 64 bit register define requires 2 consecutive
+ * entries in register list, with low dword first
+ * and hi dword the second, like:
+ * { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
+ * { XXX_REG_HI(0), REG_64BIT_HI_DW, 0, 0, "XXX_REG"},
+ *
+ * Incorrect order will trigger XE_WARN.
+ *
+ * Possible double low here, for example:
+ * { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
+ * { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
+ */
+ XE_WARN_ON(low32_ready);
+ low32_ready = true;
+ /* Low 32 bit dword saved, continue for high 32 bit */
+ break;
+
+ case REG_64BIT_HI_DW: {
+ u64 value_qw = ((u64)value << 32) | last_value;
+
+ /*
+ * Incorrect 64bit register order. Possible missing low.
+ * for example:
+ * { XXX_REG(0), REG_32BIT, 0, 0, NULL},
+ * { XXX_REG_HI(0), REG_64BIT_HI_DW, 0, 0, NULL},
+ */
+ XE_WARN_ON(!low32_ready);
+ low32_ready = false;
+
+ drm_printf(p, "\t%s: 0x%016llx\n", reg_desc->regname, value_qw);
+ break;
+ }
+
+ case REG_32BIT:
+ /*
+ * Incorrect 64bit register order. Possible missing high.
+ * for example:
+ * { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
+ * { XXX_REG(0), REG_32BIT, 0, 0, "XXX_REG"},
+ */
+ XE_WARN_ON(low32_ready);
+
+ if (FIELD_GET(GUC_REGSET_STEERING_NEEDED, reg_desc->flags))
+ drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname,
+ reg_desc->dss_id, value);
+ else
+ drm_printf(p, "\t%s: 0x%08x\n", reg_desc->regname, value);
+
+ break;
+ }
+ }
+
+ /*
+ * Incorrect 64bit register order. Possible missing high.
+ * for example:
+ * { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
+ * } // <- Register list end
+ */
+ XE_WARN_ON(low32_ready);
+}
+
+/**
+ * xe_engine_snapshot_print - Print out a given Xe HW Engine snapshot.
+ * @snapshot: Xe HW Engine snapshot object.
+ * @p: drm_printer where it will be printed out.
+ *
+ * This function prints out a given Xe HW Engine snapshot object.
+ */
+void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p)
+{
+ const char *grptype[GUC_STATE_CAPTURE_GROUP_TYPE_MAX] = {
+ "full-capture",
+ "partial-capture"
+ };
+ int type;
+ const struct __guc_mmio_reg_descr_group *list;
+ enum guc_capture_list_class_type capture_class;
+
+ struct xe_gt *gt;
+ struct xe_device *xe;
+ struct xe_devcoredump *devcoredump;
+ struct xe_devcoredump_snapshot *devcore_snapshot;
+
+ if (!snapshot)
+ return;
+
+ gt = snapshot->hwe->gt;
+ xe = gt_to_xe(gt);
+ devcoredump = &xe->devcoredump;
+ devcore_snapshot = &devcoredump->snapshot;
+
+ if (!devcore_snapshot->matched_node)
+ return;
+
+ xe_gt_assert(gt, snapshot->hwe);
+
+ capture_class = xe_engine_class_to_guc_capture_class(snapshot->hwe->class);
+
+ drm_printf(p, "%s (physical), logical instance=%d\n",
+ snapshot->name ? snapshot->name : "",
+ snapshot->logical_instance);
+ drm_printf(p, "\tCapture_source: %s\n",
+ devcore_snapshot->matched_node->source == XE_ENGINE_CAPTURE_SOURCE_GUC ?
+ "GuC" : "Manual");
+ drm_printf(p, "\tCoverage: %s\n", grptype[devcore_snapshot->matched_node->is_partial]);
+ drm_printf(p, "\tForcewake: domain 0x%x, ref %d\n",
+ snapshot->forcewake.domain, snapshot->forcewake.ref);
+ drm_printf(p, "\tReserved: %s\n",
+ str_yes_no(snapshot->kernel_reserved));
+
+ for (type = GUC_STATE_CAPTURE_TYPE_GLOBAL; type < GUC_STATE_CAPTURE_TYPE_MAX; type++) {
+ list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF, type,
+ capture_class, false);
+ snapshot_print_by_list_order(snapshot, p, type, list);
+ }
+
+ if (capture_class == GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE) {
+ list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF,
+ GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS,
+ capture_class, true);
+ snapshot_print_by_list_order(snapshot, p, GUC_STATE_CAPTURE_TYPE_ENGINE_CLASS,
+ list);
+ }
+
+ drm_puts(p, "\n");
+}
+
+/**
+ * xe_guc_capture_get_matching_and_lock - Matching GuC capture for the queue.
+ * @q: The exec queue object
+ *
+ * Search within the capture outlist for the queue, could be used for check if
+ * GuC capture is ready for the queue.
+ * If found, the locked boolean of the node will be flagged.
+ *
+ * Returns: found guc-capture node ptr else NULL
+ */
+struct __guc_capture_parsed_output *
+xe_guc_capture_get_matching_and_lock(struct xe_exec_queue *q)
+{
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ struct xe_device *xe;
+ u16 guc_class = GUC_LAST_ENGINE_CLASS + 1;
+ struct xe_devcoredump_snapshot *ss;
+
+ if (!q || !q->gt)
+ return NULL;
+
+ xe = gt_to_xe(q->gt);
+ if (xe->wedged.mode >= 2 || !xe_device_uc_enabled(xe) || IS_SRIOV_VF(xe))
+ return NULL;
+
+ ss = &xe->devcoredump.snapshot;
+ if (ss->matched_node && ss->matched_node->source == XE_ENGINE_CAPTURE_SOURCE_GUC)
+ return ss->matched_node;
+
+ /* Find hwe for the queue */
+ for_each_hw_engine(hwe, q->gt, id) {
+ if (hwe != q->hwe)
+ continue;
+ guc_class = xe_engine_class_to_guc_class(hwe->class);
+ break;
+ }
+
+ if (guc_class <= GUC_LAST_ENGINE_CLASS) {
+ struct __guc_capture_parsed_output *n, *ntmp;
+ struct xe_guc *guc = &q->gt->uc.guc;
+ u16 guc_id = q->guc->id;
+ u32 lrca = xe_lrc_ggtt_addr(q->lrc[0]);
+
+ /*
+ * Look for a matching GuC reported error capture node from
+ * the internal output link-list based on engine, guc id and
+ * lrca info.
+ */
+ list_for_each_entry_safe(n, ntmp, &guc->capture->outlist, link) {
+ if (n->eng_class == guc_class && n->eng_inst == hwe->instance &&
+ n->guc_id == guc_id && n->lrca == lrca &&
+ n->source == XE_ENGINE_CAPTURE_SOURCE_GUC) {
+ n->locked = 1;
+ return n;
+ }
+ }
+ }
+ return NULL;
+}
+
+/**
+ * xe_engine_snapshot_capture_for_queue - Take snapshot of associated engine
+ * @q: The exec queue object
+ *
+ * Take snapshot of associated HW Engine
+ *
+ * Returns: None.
+ */
+void
+xe_engine_snapshot_capture_for_queue(struct xe_exec_queue *q)
+{
+ struct xe_device *xe = gt_to_xe(q->gt);
+ struct xe_devcoredump *coredump = &xe->devcoredump;
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ u32 adj_logical_mask = q->logical_mask;
+
+ if (IS_SRIOV_VF(xe))
+ return;
+
+ for_each_hw_engine(hwe, q->gt, id) {
+ if (hwe->class != q->hwe->class ||
+ !(BIT(hwe->logical_instance) & adj_logical_mask)) {
+ coredump->snapshot.hwe[id] = NULL;
+ continue;
+ }
+
+ if (!coredump->snapshot.hwe[id]) {
+ coredump->snapshot.hwe[id] =
+ xe_hw_engine_snapshot_capture(hwe, q);
+ } else {
+ struct __guc_capture_parsed_output *new;
+
+ new = xe_guc_capture_get_matching_and_lock(q);
+ if (new) {
+ struct xe_guc *guc = &q->gt->uc.guc;
+
+ /*
+ * If we are in here, it means we found a fresh
+ * GuC-err-capture node for this engine after
+ * previously failing to find a match in the
+ * early part of guc_exec_queue_timedout_job.
+ * Thus we must free the manually captured node
+ */
+ guc_capture_free_outlist_node(guc->capture,
+ coredump->snapshot.matched_node);
+ coredump->snapshot.matched_node = new;
+ }
+ }
+
+ break;
+ }
+}
+
+/*
+ * xe_guc_capture_put_matched_nodes - Cleanup matched nodes
+ * @guc: The GuC object
+ *
+ * Free matched node and all nodes with the equal guc_id from
+ * GuC captured outlist
+ */
+void xe_guc_capture_put_matched_nodes(struct xe_guc *guc)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_devcoredump *devcoredump = &xe->devcoredump;
+ struct __guc_capture_parsed_output *n = devcoredump->snapshot.matched_node;
+
+ if (n) {
+ guc_capture_remove_stale_matches_from_list(guc->capture, n);
+ guc_capture_free_outlist_node(guc->capture, n);
+ devcoredump->snapshot.matched_node = NULL;
+ }
+}
+
+/*
+ * xe_guc_capture_steered_list_init - Init steering register list
+ * @guc: The GuC object
+ *
+ * Init steering register list for GuC register capture, create pre-alloc node
+ */
+void xe_guc_capture_steered_list_init(struct xe_guc *guc)
+{
+ /*
+ * For certain engine classes, there are slice and subslice
+ * level registers requiring steering. We allocate and populate
+ * these based on hw config and add it as an extension list at
+ * the end of the pre-populated render list.
+ */
+ guc_capture_alloc_steered_lists(guc);
+ check_guc_capture_size(guc);
+ guc_capture_create_prealloc_nodes(guc);
+}
+
+/*
+ * xe_guc_capture_init - Init for GuC register capture
+ * @guc: The GuC object
+ *
+ * Init for GuC register capture, alloc memory for capture data structure.
+ *
+ * Returns: 0 if success.
+ * -ENOMEM if out of memory
+ */
+int xe_guc_capture_init(struct xe_guc *guc)
+{
+ guc->capture = drmm_kzalloc(guc_to_drm(guc), sizeof(*guc->capture), GFP_KERNEL);
+ if (!guc->capture)
+ return -ENOMEM;
+
+ guc->capture->reglists = guc_capture_get_device_reglist(guc_to_xe(guc));
+
+ INIT_LIST_HEAD(&guc->capture->outlist);
+ INIT_LIST_HEAD(&guc->capture->cachelist);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_capture.h b/drivers/gpu/drm/xe/xe_guc_capture.h
new file mode 100644
index 000000000000..20a078dc4b85
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_capture.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021-2024 Intel Corporation
+ */
+
+#ifndef _XE_GUC_CAPTURE_H
+#define _XE_GUC_CAPTURE_H
+
+#include <linux/types.h>
+#include "abi/guc_capture_abi.h"
+#include "xe_guc.h"
+#include "xe_guc_fwif.h"
+
+struct xe_exec_queue;
+struct xe_guc;
+struct xe_hw_engine;
+struct xe_hw_engine_snapshot;
+
+static inline enum guc_capture_list_class_type xe_guc_class_to_capture_class(u16 class)
+{
+ switch (class) {
+ case GUC_RENDER_CLASS:
+ case GUC_COMPUTE_CLASS:
+ return GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE;
+ case GUC_GSC_OTHER_CLASS:
+ return GUC_CAPTURE_LIST_CLASS_GSC_OTHER;
+ case GUC_VIDEO_CLASS:
+ case GUC_VIDEOENHANCE_CLASS:
+ case GUC_BLITTER_CLASS:
+ return class;
+ default:
+ XE_WARN_ON(class);
+ return GUC_CAPTURE_LIST_CLASS_MAX;
+ }
+}
+
+static inline enum guc_capture_list_class_type
+xe_engine_class_to_guc_capture_class(enum xe_engine_class class)
+{
+ return xe_guc_class_to_capture_class(xe_engine_class_to_guc_class(class));
+}
+
+void xe_guc_capture_process(struct xe_guc *guc);
+int xe_guc_capture_getlist(struct xe_guc *guc, u32 owner, u32 type,
+ enum guc_capture_list_class_type capture_class, void **outptr);
+int xe_guc_capture_getlistsize(struct xe_guc *guc, u32 owner, u32 type,
+ enum guc_capture_list_class_type capture_class, size_t *size);
+int xe_guc_capture_getnullheader(struct xe_guc *guc, void **outptr, size_t *size);
+size_t xe_guc_capture_ads_input_worst_size(struct xe_guc *guc);
+const struct __guc_mmio_reg_descr_group *
+xe_guc_capture_get_reg_desc_list(struct xe_gt *gt, u32 owner, u32 type,
+ enum guc_capture_list_class_type capture_class, bool is_ext);
+struct __guc_capture_parsed_output *xe_guc_capture_get_matching_and_lock(struct xe_exec_queue *q);
+void xe_engine_manual_capture(struct xe_hw_engine *hwe, struct xe_hw_engine_snapshot *snapshot);
+void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p);
+void xe_engine_snapshot_capture_for_queue(struct xe_exec_queue *q);
+void xe_guc_capture_steered_list_init(struct xe_guc *guc);
+void xe_guc_capture_put_matched_nodes(struct xe_guc *guc);
+int xe_guc_capture_init(struct xe_guc *guc);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_capture_types.h b/drivers/gpu/drm/xe/xe_guc_capture_types.h
new file mode 100644
index 000000000000..6cb439115597
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_capture_types.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021-2024 Intel Corporation
+ */
+
+#ifndef _XE_GUC_CAPTURE_TYPES_H
+#define _XE_GUC_CAPTURE_TYPES_H
+
+#include <linux/types.h>
+#include "regs/xe_reg_defs.h"
+
+struct xe_guc;
+
+/* data type of the register in register list */
+enum capture_register_data_type {
+ REG_32BIT = 0,
+ REG_64BIT_LOW_DW,
+ REG_64BIT_HI_DW,
+};
+
+/**
+ * struct __guc_mmio_reg_descr - GuC mmio register descriptor
+ *
+ * xe_guc_capture module uses these structures to define a register
+ * (offsets, names, flags,...) that are used at the ADS registration
+ * time as well as during runtime processing and reporting of error-
+ * capture states generated by GuC just prior to engine reset events.
+ */
+struct __guc_mmio_reg_descr {
+ /** @reg: the register */
+ struct xe_reg reg;
+ /**
+ * @data_type: data type of the register
+ * Could be 32 bit, low or hi dword of a 64 bit, see enum
+ * register_data_type
+ */
+ enum capture_register_data_type data_type;
+ /** @flags: Flags for the register */
+ u32 flags;
+ /** @mask: The mask to apply */
+ u32 mask;
+ /** @dss_id: Cached index for steered registers */
+ u32 dss_id;
+ /** @regname: Name of the register */
+ const char *regname;
+};
+
+/**
+ * struct __guc_mmio_reg_descr_group - The group of register descriptor
+ *
+ * xe_guc_capture module uses these structures to maintain static
+ * tables (per unique platform) that consists of lists of registers
+ * (offsets, names, flags,...) that are used at the ADS registration
+ * time as well as during runtime processing and reporting of error-
+ * capture states generated by GuC just prior to engine reset events.
+ */
+struct __guc_mmio_reg_descr_group {
+ /** @list: The register list */
+ const struct __guc_mmio_reg_descr *list;
+ /** @num_regs: Count of registers in the list */
+ u32 num_regs;
+ /** @owner: PF/VF owner, see enum guc_capture_list_index_type */
+ u32 owner;
+ /** @type: Capture register type, see enum guc_state_capture_type */
+ u32 type;
+ /** @engine: The engine class, see enum guc_capture_list_class_type */
+ u32 engine;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 17986bfd8818..bbcbb348256f 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/circ_buf.h>
#include <linux/delay.h>
+#include <linux/fault-inject.h>
#include <kunit/static_stub.h>
@@ -17,6 +18,7 @@
#include "abi/guc_actions_sriov_abi.h"
#include "abi/guc_klvs_abi.h"
#include "xe_bo.h"
+#include "xe_devcoredump.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_gt_pagefault.h"
@@ -25,12 +27,54 @@
#include "xe_gt_sriov_pf_monitor.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
+#include "xe_guc_log.h"
#include "xe_guc_relay.h"
#include "xe_guc_submit.h"
#include "xe_map.h"
#include "xe_pm.h"
#include "xe_trace_guc.h"
+static void receive_g2h(struct xe_guc_ct *ct);
+static void g2h_worker_func(struct work_struct *w);
+static void safe_mode_worker_func(struct work_struct *w);
+static void ct_exit_safe_mode(struct xe_guc_ct *ct);
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+enum {
+ /* Internal states, not error conditions */
+ CT_DEAD_STATE_REARM, /* 0x0001 */
+ CT_DEAD_STATE_CAPTURE, /* 0x0002 */
+
+ /* Error conditions */
+ CT_DEAD_SETUP, /* 0x0004 */
+ CT_DEAD_H2G_WRITE, /* 0x0008 */
+ CT_DEAD_H2G_HAS_ROOM, /* 0x0010 */
+ CT_DEAD_G2H_READ, /* 0x0020 */
+ CT_DEAD_G2H_RECV, /* 0x0040 */
+ CT_DEAD_G2H_RELEASE, /* 0x0080 */
+ CT_DEAD_DEADLOCK, /* 0x0100 */
+ CT_DEAD_PROCESS_FAILED, /* 0x0200 */
+ CT_DEAD_FAST_G2H, /* 0x0400 */
+ CT_DEAD_PARSE_G2H_RESPONSE, /* 0x0800 */
+ CT_DEAD_PARSE_G2H_UNKNOWN, /* 0x1000 */
+ CT_DEAD_PARSE_G2H_ORIGIN, /* 0x2000 */
+ CT_DEAD_PARSE_G2H_TYPE, /* 0x4000 */
+ CT_DEAD_CRASH, /* 0x8000 */
+};
+
+static void ct_dead_worker_func(struct work_struct *w);
+static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code);
+
+#define CT_DEAD(ct, ctb, reason_code) ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code)
+#else
+#define CT_DEAD(ct, ctb, reason) \
+ do { \
+ struct guc_ctb *_ctb = (ctb); \
+ if (_ctb) \
+ _ctb->info.broken = true; \
+ } while (0)
+#endif
+
/* Used when a CT send wants to block and / or receive data */
struct g2h_fence {
u32 *response_buffer;
@@ -147,14 +191,11 @@ static void guc_ct_fini(struct drm_device *drm, void *arg)
{
struct xe_guc_ct *ct = arg;
+ ct_exit_safe_mode(ct);
destroy_workqueue(ct->g2h_wq);
xa_destroy(&ct->fence_lookup);
}
-static void receive_g2h(struct xe_guc_ct *ct);
-static void g2h_worker_func(struct work_struct *w);
-static void safe_mode_worker_func(struct work_struct *w);
-
static void primelockdep(struct xe_guc_ct *ct)
{
if (!IS_ENABLED(CONFIG_LOCKDEP))
@@ -175,14 +216,18 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
- ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", 0);
+ ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM);
if (!ct->g2h_wq)
return -ENOMEM;
spin_lock_init(&ct->fast_lock);
xa_init(&ct->fence_lookup);
INIT_WORK(&ct->g2h_worker, g2h_worker_func);
- INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func);
+ INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func);
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+ spin_lock_init(&ct->dead.lock);
+ INIT_WORK(&ct->dead.worker, ct_dead_worker_func);
+#endif
init_waitqueue_head(&ct->wq);
init_waitqueue_head(&ct->g2h_fence_wq);
@@ -195,7 +240,8 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(),
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -209,6 +255,7 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
ct->state = XE_GUC_CT_STATE_DISABLED;
return 0;
}
+ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */
#define desc_read(xe_, guc_ctb__, field_) \
xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \
@@ -395,6 +442,7 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
+ xe_map_memset(xe, &ct->bo->vmap, 0, 0, ct->bo->size);
guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
@@ -419,10 +467,24 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
if (ct_needs_safe_mode(ct))
ct_enter_safe_mode(ct);
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+ /*
+ * The CT has now been reset so the dumper can be re-armed
+ * after any existing dead state has been dumped.
+ */
+ spin_lock_irq(&ct->dead.lock);
+ if (ct->dead.reason) {
+ ct->dead.reason |= (1 << CT_DEAD_STATE_REARM);
+ queue_work(system_unbound_wq, &ct->dead.worker);
+ }
+ spin_unlock_irq(&ct->dead.lock);
+#endif
+
return 0;
err_out:
xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err));
+ CT_DEAD(ct, NULL, SETUP);
return err;
}
@@ -454,6 +516,9 @@ void xe_guc_ct_disable(struct xe_guc_ct *ct)
*/
void xe_guc_ct_stop(struct xe_guc_ct *ct)
{
+ if (!xe_guc_ct_initialized(ct))
+ return;
+
xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED);
stop_g2h_handler(ct);
}
@@ -466,6 +531,19 @@ static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
if (cmd_len > h2g->info.space) {
h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
+
+ if (h2g->info.head > h2g->info.size) {
+ struct xe_device *xe = ct_to_xe(ct);
+ u32 desc_status = desc_read(xe, h2g, status);
+
+ desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+
+ xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n",
+ h2g->info.head, h2g->info.size);
+ CT_DEAD(ct, h2g, H2G_HAS_ROOM);
+ return false;
+ }
+
h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head,
h2g->info.size) -
h2g->info.resv_space;
@@ -521,10 +599,24 @@ static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
{
+ bool bad = false;
+
lockdep_assert_held(&ct->fast_lock);
- xe_gt_assert(ct_to_gt(ct), ct->ctbs.g2h.info.space + g2h_len <=
- ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
- xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding);
+
+ bad = ct->ctbs.g2h.info.space + g2h_len >
+ ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space;
+ bad |= !ct->g2h_outstanding;
+
+ if (bad) {
+ xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n",
+ ct->ctbs.g2h.info.space, g2h_len,
+ ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space,
+ ct->ctbs.g2h.info.space + g2h_len,
+ ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space,
+ ct->g2h_outstanding);
+ CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE);
+ return;
+ }
ct->ctbs.g2h.info.space += g2h_len;
if (!--ct->g2h_outstanding)
@@ -551,12 +643,43 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
u32 full_len;
struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds,
tail * sizeof(u32));
+ u32 desc_status;
full_len = len + GUC_CTB_HDR_LEN;
lockdep_assert_held(&ct->lock);
xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN);
- xe_gt_assert(gt, tail <= h2g->info.size);
+
+ desc_status = desc_read(xe, h2g, status);
+ if (desc_status) {
+ xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status);
+ goto corrupted;
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
+ u32 desc_tail = desc_read(xe, h2g, tail);
+ u32 desc_head = desc_read(xe, h2g, head);
+
+ if (tail != desc_tail) {
+ desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_MISMATCH);
+ xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail);
+ goto corrupted;
+ }
+
+ if (tail > h2g->info.size) {
+ desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+ xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n",
+ tail, h2g->info.size);
+ goto corrupted;
+ }
+
+ if (desc_head >= h2g->info.size) {
+ desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+ xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n",
+ desc_head, h2g->info.size);
+ goto corrupted;
+ }
+ }
/* Command will wrap, zero fill (NOPs), return and check credits again */
if (tail + full_len > h2g->info.size) {
@@ -593,7 +716,7 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
--len;
++action;
- /* Write H2G ensuring visable before descriptor update */
+ /* Write H2G ensuring visible before descriptor update */
xe_map_memcpy_to(xe, &map, 0, cmd, H2G_CT_HEADERS * sizeof(u32));
xe_map_memcpy_to(xe, &map, H2G_CT_HEADERS * sizeof(u32), action, len * sizeof(u32));
xe_device_wmb(xe);
@@ -609,6 +732,10 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
desc_read(xe, h2g, head), h2g->info.tail);
return 0;
+
+corrupted:
+ CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE);
+ return -EPIPE;
}
/*
@@ -638,7 +765,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
u16 seqno;
int ret;
- xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
+ xe_gt_assert(gt, xe_guc_ct_initialized(ct));
xe_gt_assert(gt, !g2h_len || !g2h_fence);
xe_gt_assert(gt, !num_g2h || !g2h_fence);
xe_gt_assert(gt, !g2h_len || num_g2h);
@@ -716,7 +843,6 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
{
struct xe_device *xe = ct_to_xe(ct);
struct xe_gt *gt = ct_to_gt(ct);
- struct drm_printer p = xe_gt_info_printer(gt);
unsigned int sleep_period_ms = 1;
int ret;
@@ -769,8 +895,13 @@ try_again:
goto broken;
#undef g2h_avail
- if (dequeue_one_g2h(ct) < 0)
+ ret = dequeue_one_g2h(ct);
+ if (ret < 0) {
+ if (ret != -ECANCELED)
+ xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)",
+ ERR_PTR(ret));
goto broken;
+ }
goto try_again;
}
@@ -779,8 +910,7 @@ try_again:
broken:
xe_gt_err(gt, "No forward process on H2G, reset required\n");
- xe_guc_ct_print(ct, &p, true);
- ct->ctbs.h2g.info.broken = true;
+ CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK);
return -EDEADLK;
}
@@ -848,7 +978,7 @@ static bool retry_failure(struct xe_guc_ct *ct, int ret)
#define ct_alive(ct) \
(xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \
!ct->ctbs.g2h.info.broken)
- if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
+ if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
return false;
#undef ct_alive
@@ -890,24 +1020,14 @@ retry_same_fence:
goto retry_same_fence;
if (!g2h_fence_needs_alloc(&g2h_fence))
- xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
+ xa_erase(&ct->fence_lookup, g2h_fence.seqno);
return ret;
}
ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
-
- /*
- * Occasionally it is seen that the G2H worker starts running after a delay of more than
- * a second even after being queued and activated by the Linux workqueue subsystem. This
- * leads to G2H timeout error. The root cause of issue lies with scheduling latency of
- * Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS
- * and this is beyond xe kmd.
- *
- * TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
- */
if (!ret) {
- flush_work(&ct->g2h_worker);
+ LNL_FLUSH_WORK(&ct->g2h_worker);
if (g2h_fence.done) {
xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
g2h_fence.seqno, action[0]);
@@ -925,7 +1045,7 @@ retry_same_fence:
if (!ret) {
xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s",
g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done));
- xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
+ xa_erase(&ct->fence_lookup, g2h_fence.seqno);
mutex_unlock(&ct->lock);
return -ETIME;
}
@@ -974,6 +1094,7 @@ int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
return guc_ct_send_recv(ct, action, len, response_buffer, false);
}
+ALLOW_ERROR_INJECTION(xe_guc_ct_send_recv, ERRNO);
int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
u32 len, u32 *response_buffer)
@@ -1009,6 +1130,24 @@ static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
return 0;
}
+static int guc_crash_process_msg(struct xe_guc_ct *ct, u32 action)
+{
+ struct xe_gt *gt = ct_to_gt(ct);
+
+ if (action == XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED)
+ xe_gt_err(gt, "GuC Crash dump notification\n");
+ else if (action == XE_GUC_ACTION_NOTIFY_EXCEPTION)
+ xe_gt_err(gt, "GuC Exception notification\n");
+ else
+ xe_gt_err(gt, "Unknown GuC crash notification: 0x%04X\n", action);
+
+ CT_DEAD(ct, NULL, CRASH);
+
+ kick_reset(ct);
+
+ return 0;
+}
+
static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
struct xe_gt *gt = ct_to_gt(ct);
@@ -1037,6 +1176,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
else
xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
type, fence);
+ CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
return -EPROTO;
}
@@ -1044,6 +1184,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
g2h_fence = xa_erase(&ct->fence_lookup, fence);
if (unlikely(!g2h_fence)) {
/* Don't tear down channel, as send could've timed out */
+ /* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */
xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence);
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
return 0;
@@ -1088,7 +1229,7 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n",
origin);
- ct->ctbs.g2h.info.broken = true;
+ CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN);
return -EPROTO;
}
@@ -1106,7 +1247,7 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
default:
xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n",
type);
- ct->ctbs.g2h.info.broken = true;
+ CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE);
ret = -EOPNOTSUPP;
}
@@ -1149,6 +1290,8 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
/* Selftest only at the moment */
break;
case XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION:
+ ret = xe_guc_error_capture_handler(guc, payload, adj_len);
+ break;
case XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE:
/* FIXME: Handle this */
break;
@@ -1179,13 +1322,19 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
case GUC_ACTION_GUC2PF_ADVERSE_EVENT:
ret = xe_gt_sriov_pf_monitor_process_guc2pf(gt, hxg, hxg_len);
break;
+ case XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED:
+ case XE_GUC_ACTION_NOTIFY_EXCEPTION:
+ ret = guc_crash_process_msg(ct, action);
+ break;
default:
xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
}
- if (ret)
- xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
- action, ERR_PTR(ret));
+ if (ret) {
+ xe_gt_err(gt, "G2H action %#04x failed (%pe) len %u msg %*ph\n",
+ action, ERR_PTR(ret), hxg_len, (int)sizeof(u32) * hxg_len, hxg);
+ CT_DEAD(ct, NULL, PROCESS_FAILED);
+ }
return 0;
}
@@ -1195,12 +1344,12 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
struct xe_device *xe = ct_to_xe(ct);
struct xe_gt *gt = ct_to_gt(ct);
struct guc_ctb *g2h = &ct->ctbs.g2h;
- u32 tail, head, len;
+ u32 tail, head, len, desc_status;
s32 avail;
u32 action;
u32 *hxg;
- xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
+ xe_gt_assert(gt, xe_guc_ct_initialized(ct));
lockdep_assert_held(&ct->fast_lock);
if (ct->state == XE_GUC_CT_STATE_DISABLED)
@@ -1214,6 +1363,63 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
xe_gt_assert(gt, xe_guc_ct_enabled(ct));
+ desc_status = desc_read(xe, g2h, status);
+ if (desc_status) {
+ if (desc_status & GUC_CTB_STATUS_DISABLED) {
+ /*
+ * Potentially valid if a CLIENT_RESET request resulted in
+ * contexts/engines being reset. But should never happen as
+ * no contexts should be active when CLIENT_RESET is sent.
+ */
+ xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n");
+ desc_status &= ~GUC_CTB_STATUS_DISABLED;
+ }
+
+ if (desc_status) {
+ xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status);
+ goto corrupted;
+ }
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
+ u32 desc_tail = desc_read(xe, g2h, tail);
+ /*
+ u32 desc_head = desc_read(xe, g2h, head);
+
+ * info.head and desc_head are updated back-to-back at the end of
+ * this function and nowhere else. Hence, they cannot be different
+ * unless two g2h_read calls are running concurrently. Which is not
+ * possible because it is guarded by ct->fast_lock. And yet, some
+ * discrete platforms are regularly hitting this error :(.
+ *
+ * desc_head rolling backwards shouldn't cause any noticeable
+ * problems - just a delay in GuC being allowed to proceed past that
+ * point in the queue. So for now, just disable the error until it
+ * can be root caused.
+ *
+ if (g2h->info.head != desc_head) {
+ desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_MISMATCH);
+ xe_gt_err(gt, "CT read: head was modified %u != %u\n",
+ desc_head, g2h->info.head);
+ goto corrupted;
+ }
+ */
+
+ if (g2h->info.head > g2h->info.size) {
+ desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+ xe_gt_err(gt, "CT read: head out of range: %u vs %u\n",
+ g2h->info.head, g2h->info.size);
+ goto corrupted;
+ }
+
+ if (desc_tail >= g2h->info.size) {
+ desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW);
+ xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n",
+ desc_tail, g2h->info.size);
+ goto corrupted;
+ }
+ }
+
/* Calculate DW available to read */
tail = desc_read(xe, g2h, tail);
avail = tail - g2h->info.head;
@@ -1230,9 +1436,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
if (len > avail) {
xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n",
avail, len);
- g2h->info.broken = true;
-
- return -EPROTO;
+ goto corrupted;
}
head = (g2h->info.head + 1) % g2h->info.size;
@@ -1278,6 +1482,10 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
action, len, g2h->info.head, tail);
return len;
+
+corrupted:
+ CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ);
+ return -EPROTO;
}
static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
@@ -1304,9 +1512,11 @@ static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
xe_gt_warn(gt, "NOT_POSSIBLE");
}
- if (ret)
+ if (ret) {
xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n",
action, ERR_PTR(ret));
+ CT_DEAD(ct, NULL, FAST_G2H);
+ }
}
/**
@@ -1366,7 +1576,6 @@ static int dequeue_one_g2h(struct xe_guc_ct *ct)
static void receive_g2h(struct xe_guc_ct *ct)
{
- struct xe_gt *gt = ct_to_gt(ct);
bool ongoing;
int ret;
@@ -1403,9 +1612,8 @@ static void receive_g2h(struct xe_guc_ct *ct)
mutex_unlock(&ct->lock);
if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) {
- struct drm_printer p = xe_gt_info_printer(gt);
-
- xe_guc_ct_print(ct, &p, false);
+ xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d", ret);
+ CT_DEAD(ct, NULL, G2H_RECV);
kick_reset(ct);
}
} while (ret == 1);
@@ -1421,49 +1629,34 @@ static void g2h_worker_func(struct work_struct *w)
receive_g2h(ct);
}
-static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
- struct guc_ctb_snapshot *snapshot,
- bool atomic)
+static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic,
+ bool want_ctb)
{
- u32 head, tail;
-
- xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0,
- sizeof(struct guc_ct_buffer_desc));
- memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info));
+ struct xe_guc_ct_snapshot *snapshot;
- snapshot->cmds = kmalloc_array(ctb->info.size, sizeof(u32),
- atomic ? GFP_ATOMIC : GFP_KERNEL);
+ snapshot = kzalloc(sizeof(*snapshot), atomic ? GFP_ATOMIC : GFP_KERNEL);
+ if (!snapshot)
+ return NULL;
- if (!snapshot->cmds) {
- drm_err(&xe->drm, "Skipping CTB commands snapshot. Only CTB info will be available.\n");
- return;
+ if (ct->bo && want_ctb) {
+ snapshot->ctb_size = ct->bo->size;
+ snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL);
}
- head = snapshot->desc.head;
- tail = snapshot->desc.tail;
-
- if (head != tail) {
- struct iosys_map map =
- IOSYS_MAP_INIT_OFFSET(&ctb->cmds, head * sizeof(u32));
-
- while (head != tail) {
- snapshot->cmds[head] = xe_map_rd(xe, &map, 0, u32);
- ++head;
- if (head == ctb->info.size) {
- head = 0;
- map = ctb->cmds;
- } else {
- iosys_map_incr(&map, sizeof(u32));
- }
- }
- }
+ return snapshot;
+}
+
+static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
+ struct guc_ctb_snapshot *snapshot)
+{
+ xe_map_memcpy_from(xe, &snapshot->desc, &ctb->desc, 0,
+ sizeof(struct guc_ct_buffer_desc));
+ memcpy(&snapshot->info, &ctb->info, sizeof(struct guc_ctb_info));
}
static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot,
struct drm_printer *p)
{
- u32 head, tail;
-
drm_printf(p, "\tsize: %d\n", snapshot->info.size);
drm_printf(p, "\tresv_space: %d\n", snapshot->info.resv_space);
drm_printf(p, "\thead: %d\n", snapshot->info.head);
@@ -1473,63 +1666,46 @@ static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot,
drm_printf(p, "\thead (memory): %d\n", snapshot->desc.head);
drm_printf(p, "\ttail (memory): %d\n", snapshot->desc.tail);
drm_printf(p, "\tstatus (memory): 0x%x\n", snapshot->desc.status);
+}
- if (!snapshot->cmds)
- return;
+static struct xe_guc_ct_snapshot *guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic,
+ bool want_ctb)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_guc_ct_snapshot *snapshot;
- head = snapshot->desc.head;
- tail = snapshot->desc.tail;
+ snapshot = guc_ct_snapshot_alloc(ct, atomic, want_ctb);
+ if (!snapshot) {
+ xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n");
+ return NULL;
+ }
- while (head != tail) {
- drm_printf(p, "\tcmd[%d]: 0x%08x\n", head,
- snapshot->cmds[head]);
- ++head;
- if (head == snapshot->info.size)
- head = 0;
+ if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) {
+ snapshot->ct_enabled = true;
+ snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
+ guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g, &snapshot->h2g);
+ guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h, &snapshot->g2h);
}
-}
-static void guc_ctb_snapshot_free(struct guc_ctb_snapshot *snapshot)
-{
- kfree(snapshot->cmds);
+ if (ct->bo && snapshot->ctb)
+ xe_map_memcpy_from(xe, snapshot->ctb, &ct->bo->vmap, 0, snapshot->ctb_size);
+
+ return snapshot;
}
/**
* xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state.
* @ct: GuC CT object.
- * @atomic: Boolean to indicate if this is called from atomic context like
- * reset or CTB handler or from some regular path like debugfs.
*
* This can be printed out in a later stage like during dev_coredump
- * analysis.
+ * analysis. This is safe to be called during atomic context.
*
* Returns: a GuC CT snapshot object that must be freed by the caller
* by using `xe_guc_ct_snapshot_free`.
*/
-struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct,
- bool atomic)
+struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct)
{
- struct xe_device *xe = ct_to_xe(ct);
- struct xe_guc_ct_snapshot *snapshot;
-
- snapshot = kzalloc(sizeof(*snapshot),
- atomic ? GFP_ATOMIC : GFP_KERNEL);
-
- if (!snapshot) {
- drm_err(&xe->drm, "Skipping CTB snapshot entirely.\n");
- return NULL;
- }
-
- if (xe_guc_ct_enabled(ct) || ct->state == XE_GUC_CT_STATE_STOPPED) {
- snapshot->ct_enabled = true;
- snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
- guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g,
- &snapshot->h2g, atomic);
- guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h,
- &snapshot->g2h, atomic);
- }
-
- return snapshot;
+ return guc_ct_snapshot_capture(ct, true, true);
}
/**
@@ -1549,11 +1725,16 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
drm_puts(p, "H2G CTB (all sizes in DW):\n");
guc_ctb_snapshot_print(&snapshot->h2g, p);
- drm_puts(p, "\nG2H CTB (all sizes in DW):\n");
+ drm_puts(p, "G2H CTB (all sizes in DW):\n");
guc_ctb_snapshot_print(&snapshot->g2h, p);
-
drm_printf(p, "\tg2h outstanding: %d\n",
snapshot->g2h_outstanding);
+
+ if (snapshot->ctb) {
+ drm_printf(p, "[CTB].length: 0x%zx\n", snapshot->ctb_size);
+ xe_print_blob_ascii85(p, "[CTB].data", '\n',
+ snapshot->ctb, 0, snapshot->ctb_size);
+ }
} else {
drm_puts(p, "CT disabled\n");
}
@@ -1571,8 +1752,7 @@ void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot)
if (!snapshot)
return;
- guc_ctb_snapshot_free(&snapshot->h2g);
- guc_ctb_snapshot_free(&snapshot->g2h);
+ kfree(snapshot->ctb);
kfree(snapshot);
}
@@ -1580,16 +1760,121 @@ void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot)
* xe_guc_ct_print - GuC CT Print.
* @ct: GuC CT.
* @p: drm_printer where it will be printed out.
- * @atomic: Boolean to indicate if this is called from atomic context like
- * reset or CTB handler or from some regular path like debugfs.
+ * @want_ctb: Should the full CTB content be dumped (vs just the headers)
*
- * This function quickly capture a snapshot and immediately print it out.
+ * This function will quickly capture a snapshot of the CT state
+ * and immediately print it out.
*/
-void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic)
+void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb)
{
struct xe_guc_ct_snapshot *snapshot;
- snapshot = xe_guc_ct_snapshot_capture(ct, atomic);
+ snapshot = guc_ct_snapshot_capture(ct, false, want_ctb);
xe_guc_ct_snapshot_print(snapshot, p);
xe_guc_ct_snapshot_free(snapshot);
}
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code)
+{
+ struct xe_guc_log_snapshot *snapshot_log;
+ struct xe_guc_ct_snapshot *snapshot_ct;
+ struct xe_guc *guc = ct_to_guc(ct);
+ unsigned long flags;
+ bool have_capture;
+
+ if (ctb)
+ ctb->info.broken = true;
+
+ /* Ignore further errors after the first dump until a reset */
+ if (ct->dead.reported)
+ return;
+
+ spin_lock_irqsave(&ct->dead.lock, flags);
+
+ /* And only capture one dump at a time */
+ have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE);
+ ct->dead.reason |= (1 << reason_code) |
+ (1 << CT_DEAD_STATE_CAPTURE);
+
+ spin_unlock_irqrestore(&ct->dead.lock, flags);
+
+ if (have_capture)
+ return;
+
+ snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true);
+ snapshot_ct = xe_guc_ct_snapshot_capture((ct));
+
+ spin_lock_irqsave(&ct->dead.lock, flags);
+
+ if (ct->dead.snapshot_log || ct->dead.snapshot_ct) {
+ xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n");
+ xe_guc_log_snapshot_free(snapshot_log);
+ xe_guc_ct_snapshot_free(snapshot_ct);
+ } else {
+ ct->dead.snapshot_log = snapshot_log;
+ ct->dead.snapshot_ct = snapshot_ct;
+ }
+
+ spin_unlock_irqrestore(&ct->dead.lock, flags);
+
+ queue_work(system_unbound_wq, &(ct)->dead.worker);
+}
+
+static void ct_dead_print(struct xe_dead_ct *dead)
+{
+ struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead);
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
+ static int g_count;
+ struct drm_printer ip = xe_gt_info_printer(gt);
+ struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count);
+
+ if (!dead->reason) {
+ xe_gt_err(gt, "CTB is dead for no reason!?\n");
+ return;
+ }
+
+
+ /* Can't generate a genuine core dump at this point, so just do the good bits */
+ drm_puts(&lp, "**** Xe Device Coredump ****\n");
+ drm_printf(&lp, "Reason: CTB is dead - 0x%X\n", dead->reason);
+ xe_device_snapshot_print(xe, &lp);
+
+ drm_printf(&lp, "**** GT #%d ****\n", gt->info.id);
+ drm_printf(&lp, "\tTile: %d\n", gt->tile->id);
+
+ drm_puts(&lp, "**** GuC Log ****\n");
+ xe_guc_log_snapshot_print(dead->snapshot_log, &lp);
+
+ drm_puts(&lp, "**** GuC CT ****\n");
+ xe_guc_ct_snapshot_print(dead->snapshot_ct, &lp);
+
+ drm_puts(&lp, "Done.\n");
+}
+
+static void ct_dead_worker_func(struct work_struct *w)
+{
+ struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker);
+
+ if (!ct->dead.reported) {
+ ct->dead.reported = true;
+ ct_dead_print(&ct->dead);
+ }
+
+ spin_lock_irq(&ct->dead.lock);
+
+ xe_guc_log_snapshot_free(ct->dead.snapshot_log);
+ ct->dead.snapshot_log = NULL;
+ xe_guc_ct_snapshot_free(ct->dead.snapshot_ct);
+ ct->dead.snapshot_ct = NULL;
+
+ if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) {
+ /* A reset has occurred so re-arm the error reporting */
+ ct->dead.reason = 0;
+ ct->dead.reported = false;
+ }
+
+ spin_unlock_irq(&ct->dead.lock);
+}
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
index 190202fce2d0..582aac106469 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -9,6 +9,7 @@
#include "xe_guc_ct_types.h"
struct drm_printer;
+struct xe_device;
int xe_guc_ct_init(struct xe_guc_ct *ct);
int xe_guc_ct_enable(struct xe_guc_ct *ct);
@@ -16,12 +17,15 @@ void xe_guc_ct_disable(struct xe_guc_ct *ct);
void xe_guc_ct_stop(struct xe_guc_ct *ct);
void xe_guc_ct_fast_path(struct xe_guc_ct *ct);
-struct xe_guc_ct_snapshot *
-xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic);
-void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
- struct drm_printer *p);
+struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct);
+void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, struct drm_printer *p);
void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot);
-void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic);
+void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb);
+
+static inline bool xe_guc_ct_initialized(struct xe_guc_ct *ct)
+{
+ return ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED;
+}
static inline bool xe_guc_ct_enabled(struct xe_guc_ct *ct)
{
diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h
index 761cb9031298..8e1b9d981d61 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h
@@ -52,8 +52,6 @@ struct guc_ctb {
struct guc_ctb_snapshot {
/** @desc: snapshot of the CTB descriptor */
struct guc_ct_buffer_desc desc;
- /** @cmds: snapshot of the CTB commands */
- u32 *cmds;
/** @info: snapshot of the CTB info */
struct guc_ctb_info info;
};
@@ -70,6 +68,10 @@ struct xe_guc_ct_snapshot {
struct guc_ctb_snapshot g2h;
/** @h2g: H2G CTB snapshot */
struct guc_ctb_snapshot h2g;
+ /** @ctb_size: size of the snapshot of the CTB */
+ size_t ctb_size;
+ /** @ctb: snapshot of the entire CTB */
+ u32 *ctb;
};
/**
@@ -86,6 +88,24 @@ enum xe_guc_ct_state {
XE_GUC_CT_STATE_ENABLED,
};
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+/** struct xe_dead_ct - Information for debugging a dead CT */
+struct xe_dead_ct {
+ /** @lock: protects memory allocation/free operations, and @reason updates */
+ spinlock_t lock;
+ /** @reason: bit mask of CT_DEAD_* reason codes */
+ unsigned int reason;
+ /** @reported: for preventing multiple dumps per error sequence */
+ bool reported;
+ /** @worker: worker thread to get out of interrupt context before dumping */
+ struct work_struct worker;
+ /** snapshot_ct: copy of CT state and CTB content at point of error */
+ struct xe_guc_ct_snapshot *snapshot_ct;
+ /** snapshot_log: copy of GuC log at point of error */
+ struct xe_guc_log_snapshot *snapshot_log;
+};
+#endif
+
/**
* struct xe_guc_ct - GuC command transport (CT) layer
*
@@ -128,6 +148,11 @@ struct xe_guc_ct {
u32 msg[GUC_CTB_MSG_MAX_LEN];
/** @fast_msg: Message buffer */
u32 fast_msg[GUC_CTB_MSG_MAX_LEN];
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+ /** @dead: information for debugging dead CTs */
+ struct xe_dead_ct dead;
+#endif
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_debugfs.c b/drivers/gpu/drm/xe/xe_guc_debugfs.c
index d3822cbea273..0b102ab46c4d 100644
--- a/drivers/gpu/drm/xe/xe_guc_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_guc_debugfs.c
@@ -13,63 +13,134 @@
#include "xe_guc.h"
#include "xe_guc_ct.h"
#include "xe_guc_log.h"
+#include "xe_guc_pc.h"
#include "xe_macros.h"
#include "xe_pm.h"
-static struct xe_guc *node_to_guc(struct drm_info_node *node)
-{
- return node->info_ent->data;
-}
-
-static int guc_info(struct seq_file *m, void *data)
+/*
+ * guc_debugfs_show - A show callback for struct drm_info_list
+ * @m: the &seq_file
+ * @data: data used by the drm debugfs helpers
+ *
+ * This callback can be used in struct drm_info_list to describe debugfs
+ * files that are &xe_guc specific in similar way how we handle &xe_gt
+ * specific files using &xe_gt_debugfs_simple_show.
+ *
+ * It is assumed that those debugfs files will be created on directory entry
+ * which grandparent struct dentry d_inode->i_private points to &xe_gt.
+ *
+ * /sys/kernel/debug/dri/0/
+ * ├── gt0 # dent->d_parent->d_parent (d_inode->i_private == gt)
+ * │   ├── uc # dent->d_parent
+ * │   │   ├── guc_info # dent
+ * │   │   ├── guc_...
+ *
+ * This function assumes that &m->private will be set to the &struct
+ * drm_info_node corresponding to the instance of the info on a given &struct
+ * drm_minor (see struct drm_info_list.show for details).
+ *
+ * This function also assumes that struct drm_info_list.data will point to the
+ * function code that will actually print a file content::
+ *
+ * int (*print)(struct xe_guc *, struct drm_printer *)
+ *
+ * Example::
+ *
+ * int foo(struct xe_guc *guc, struct drm_printer *p)
+ * {
+ * drm_printf(p, "enabled %d\n", guc->submission_state.enabled);
+ * return 0;
+ * }
+ *
+ * static const struct drm_info_list bar[] = {
+ * { name = "foo", .show = guc_debugfs_show, .data = foo },
+ * };
+ *
+ * parent = debugfs_create_dir("uc", gtdir);
+ * drm_debugfs_create_files(bar, ARRAY_SIZE(bar), parent, minor);
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int guc_debugfs_show(struct seq_file *m, void *data)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_info_node *node = m->private;
+ struct dentry *parent = node->dent->d_parent;
+ struct dentry *grandparent = parent->d_parent;
+ struct xe_gt *gt = grandparent->d_inode->i_private;
+ struct xe_device *xe = gt_to_xe(gt);
+ int (*print)(struct xe_guc *, struct drm_printer *) = node->info_ent->data;
+ int ret;
xe_pm_runtime_get(xe);
- xe_guc_print_info(guc, &p);
+ ret = print(&gt->uc.guc, &p);
xe_pm_runtime_put(xe);
+ return ret;
+}
+
+static int guc_log(struct xe_guc *guc, struct drm_printer *p)
+{
+ xe_guc_log_print(&guc->log, p);
return 0;
}
-static int guc_log(struct seq_file *m, void *data)
+static int guc_log_dmesg(struct xe_guc *guc, struct drm_printer *p)
{
- struct xe_guc *guc = node_to_guc(m->private);
- struct xe_device *xe = guc_to_xe(guc);
- struct drm_printer p = drm_seq_file_printer(m);
+ xe_guc_log_print_dmesg(&guc->log);
+ return 0;
+}
- xe_pm_runtime_get(xe);
- xe_guc_log_print(&guc->log, &p);
- xe_pm_runtime_put(xe);
+static int guc_ctb(struct xe_guc *guc, struct drm_printer *p)
+{
+ xe_guc_ct_print(&guc->ct, p, true);
+ return 0;
+}
+static int guc_pc(struct xe_guc *guc, struct drm_printer *p)
+{
+ xe_guc_pc_print(&guc->pc, p);
return 0;
}
-static const struct drm_info_list debugfs_list[] = {
- {"guc_info", guc_info, 0},
- {"guc_log", guc_log, 0},
+/*
+ * only for GuC debugfs files which can be safely used on the VF as well:
+ * - without access to the GuC privileged registers
+ * - without access to the PF specific GuC objects
+ */
+static const struct drm_info_list vf_safe_debugfs_list[] = {
+ { "guc_info", .show = guc_debugfs_show, .data = xe_guc_print_info },
+ { "guc_ctb", .show = guc_debugfs_show, .data = guc_ctb },
+};
+
+/* For GuC debugfs files that require the SLPC support */
+static const struct drm_info_list slpc_debugfs_list[] = {
+ { "guc_pc", .show = guc_debugfs_show, .data = guc_pc },
+};
+
+/* everything else should be added here */
+static const struct drm_info_list pf_only_debugfs_list[] = {
+ { "guc_log", .show = guc_debugfs_show, .data = guc_log },
+ { "guc_log_dmesg", .show = guc_debugfs_show, .data = guc_log_dmesg },
};
void xe_guc_debugfs_register(struct xe_guc *guc, struct dentry *parent)
{
- struct drm_minor *minor = guc_to_xe(guc)->drm.primary;
- struct drm_info_list *local;
- int i;
+ struct xe_device *xe = guc_to_xe(guc);
+ struct drm_minor *minor = xe->drm.primary;
-#define DEBUGFS_SIZE (ARRAY_SIZE(debugfs_list) * sizeof(struct drm_info_list))
- local = drmm_kmalloc(&guc_to_xe(guc)->drm, DEBUGFS_SIZE, GFP_KERNEL);
- if (!local)
- return;
-
- memcpy(local, debugfs_list, DEBUGFS_SIZE);
-#undef DEBUGFS_SIZE
+ drm_debugfs_create_files(vf_safe_debugfs_list,
+ ARRAY_SIZE(vf_safe_debugfs_list),
+ parent, minor);
- for (i = 0; i < ARRAY_SIZE(debugfs_list); ++i)
- local[i].data = guc;
+ if (!IS_SRIOV_VF(xe)) {
+ drm_debugfs_create_files(pf_only_debugfs_list,
+ ARRAY_SIZE(pf_only_debugfs_list),
+ parent, minor);
- drm_debugfs_create_files(local,
- ARRAY_SIZE(debugfs_list),
- parent, minor);
+ if (!xe->info.skip_guc_pc)
+ drm_debugfs_create_files(slpc_debugfs_list,
+ ARRAY_SIZE(slpc_debugfs_list),
+ parent, minor);
+ }
}
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.c b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
new file mode 100644
index 000000000000..0fb48f8f05d8
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "abi/guc_actions_abi.h"
+#include "regs/xe_gt_regs.h"
+
+#include "xe_bo.h"
+#include "xe_force_wake.h"
+#include "xe_gt_printk.h"
+#include "xe_guc.h"
+#include "xe_guc_engine_activity.h"
+#include "xe_guc_ct.h"
+#include "xe_hw_engine.h"
+#include "xe_map.h"
+#include "xe_mmio.h"
+#include "xe_sriov_pf_helpers.h"
+#include "xe_trace_guc.h"
+
+#define TOTAL_QUANTA 0x8000
+
+static struct iosys_map engine_activity_map(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int index)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_buffer *buffer;
+ u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
+ size_t offset;
+
+ if (engine_activity->num_functions) {
+ buffer = &engine_activity->function_buffer;
+ offset = sizeof(struct guc_engine_activity_data) * index;
+ } else {
+ buffer = &engine_activity->device_buffer;
+ offset = 0;
+ }
+
+ offset += offsetof(struct guc_engine_activity_data,
+ engine_activity[guc_class][hwe->logical_instance]);
+
+ return IOSYS_MAP_INIT_OFFSET(&buffer->activity_bo->vmap, offset);
+}
+
+static struct iosys_map engine_metadata_map(struct xe_guc *guc,
+ unsigned int index)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_buffer *buffer;
+ size_t offset;
+
+ if (engine_activity->num_functions) {
+ buffer = &engine_activity->function_buffer;
+ offset = sizeof(struct guc_engine_activity_metadata) * index;
+ } else {
+ buffer = &engine_activity->device_buffer;
+ offset = 0;
+ }
+
+ return IOSYS_MAP_INIT_OFFSET(&buffer->metadata_bo->vmap, offset);
+}
+
+static int allocate_engine_activity_group(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct xe_device *xe = guc_to_xe(guc);
+ u32 num_activity_group;
+
+ /*
+ * An additional activity group is allocated for PF
+ */
+ num_activity_group = IS_SRIOV_PF(xe) ? xe_sriov_pf_get_totalvfs(xe) + 1 : 1;
+
+ engine_activity->eag = drmm_kcalloc(&xe->drm, num_activity_group,
+ sizeof(struct engine_activity_group), GFP_KERNEL);
+
+ if (!engine_activity->eag)
+ return -ENOMEM;
+
+ engine_activity->num_activity_group = num_activity_group;
+
+ return 0;
+}
+
+static int allocate_engine_activity_buffers(struct xe_guc *guc,
+ struct engine_activity_buffer *buffer,
+ int count)
+{
+ u32 metadata_size = sizeof(struct guc_engine_activity_metadata) * count;
+ u32 size = sizeof(struct guc_engine_activity_data) * count;
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_bo *bo, *metadata_bo;
+
+ metadata_bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(metadata_size),
+ ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE);
+
+ if (IS_ERR(metadata_bo))
+ return PTR_ERR(metadata_bo);
+
+ bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(size),
+ ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE);
+
+ if (IS_ERR(bo)) {
+ xe_bo_unpin_map_no_vm(metadata_bo);
+ return PTR_ERR(bo);
+ }
+
+ buffer->metadata_bo = metadata_bo;
+ buffer->activity_bo = bo;
+ return 0;
+}
+
+static void free_engine_activity_buffers(struct engine_activity_buffer *buffer)
+{
+ xe_bo_unpin_map_no_vm(buffer->metadata_bo);
+ xe_bo_unpin_map_no_vm(buffer->activity_bo);
+}
+
+static bool is_engine_activity_supported(struct xe_guc *guc)
+{
+ struct xe_uc_fw_version *version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
+ struct xe_uc_fw_version required = { 1, 14, 1 };
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ if (IS_SRIOV_VF(gt_to_xe(gt))) {
+ xe_gt_info(gt, "engine activity stats not supported on VFs\n");
+ return false;
+ }
+
+ /* engine activity stats is supported from GuC interface version (1.14.1) */
+ if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER_STRUCT(required)) {
+ xe_gt_info(gt,
+ "engine activity stats unsupported in GuC interface v%u.%u.%u, need v%u.%u.%u or higher\n",
+ version->major, version->minor, version->patch, required.major,
+ required.minor, required.patch);
+ return false;
+ }
+
+ return true;
+}
+
+static struct engine_activity *hw_engine_to_engine_activity(struct xe_hw_engine *hwe,
+ unsigned int index)
+{
+ struct xe_guc *guc = &hwe->gt->uc.guc;
+ struct engine_activity_group *eag = &guc->engine_activity.eag[index];
+ u16 guc_class = xe_engine_class_to_guc_class(hwe->class);
+
+ return &eag->engine[guc_class][hwe->logical_instance];
+}
+
+static u64 cpu_ns_to_guc_tsc_tick(ktime_t ns, u32 freq)
+{
+ return mul_u64_u32_div(ns, freq, NSEC_PER_SEC);
+}
+
+#define read_engine_activity_record(xe_, map_, field_) \
+ xe_map_rd_field(xe_, map_, 0, struct guc_engine_activity, field_)
+
+#define read_metadata_record(xe_, map_, field_) \
+ xe_map_rd_field(xe_, map_, 0, struct guc_engine_activity_metadata, field_)
+
+static u64 get_engine_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int index)
+{
+ struct engine_activity *ea = hw_engine_to_engine_activity(hwe, index);
+ struct guc_engine_activity *cached_activity = &ea->activity;
+ struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct iosys_map activity_map, metadata_map;
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 last_update_tick, global_change_num;
+ u64 active_ticks, gpm_ts;
+ u16 change_num;
+
+ activity_map = engine_activity_map(guc, hwe, index);
+ metadata_map = engine_metadata_map(guc, index);
+ global_change_num = read_metadata_record(xe, &metadata_map, global_change_num);
+
+ /* GuC has not initialized activity data yet, return 0 */
+ if (!global_change_num)
+ goto update;
+
+ if (global_change_num == cached_metadata->global_change_num)
+ goto update;
+
+ cached_metadata->global_change_num = global_change_num;
+ change_num = read_engine_activity_record(xe, &activity_map, change_num);
+
+ if (!change_num || change_num == cached_activity->change_num)
+ goto update;
+
+ /* read engine activity values */
+ last_update_tick = read_engine_activity_record(xe, &activity_map, last_update_tick);
+ active_ticks = read_engine_activity_record(xe, &activity_map, active_ticks);
+
+ /* activity calculations */
+ ea->running = !!last_update_tick;
+ ea->total += active_ticks - cached_activity->active_ticks;
+ ea->active = 0;
+
+ /* cache the counter */
+ cached_activity->change_num = change_num;
+ cached_activity->last_update_tick = last_update_tick;
+ cached_activity->active_ticks = active_ticks;
+
+update:
+ if (ea->running) {
+ gpm_ts = xe_mmio_read64_2x32(&gt->mmio, MISC_STATUS_0) >>
+ engine_activity->gpm_timestamp_shift;
+ ea->active = lower_32_bits(gpm_ts) - cached_activity->last_update_tick;
+ }
+
+ trace_xe_guc_engine_activity(xe, ea, hwe->name, hwe->instance);
+
+ return ea->total + ea->active;
+}
+
+static u64 get_engine_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe, unsigned int index)
+{
+ struct engine_activity *ea = hw_engine_to_engine_activity(hwe, index);
+ struct guc_engine_activity_metadata *cached_metadata = &ea->metadata;
+ struct guc_engine_activity *cached_activity = &ea->activity;
+ struct iosys_map activity_map, metadata_map;
+ struct xe_device *xe = guc_to_xe(guc);
+ ktime_t now, cpu_delta;
+ u64 numerator;
+ u16 quanta_ratio;
+
+ activity_map = engine_activity_map(guc, hwe, index);
+ metadata_map = engine_metadata_map(guc, index);
+
+ if (!cached_metadata->guc_tsc_frequency_hz)
+ cached_metadata->guc_tsc_frequency_hz = read_metadata_record(xe, &metadata_map,
+ guc_tsc_frequency_hz);
+
+ quanta_ratio = read_engine_activity_record(xe, &activity_map, quanta_ratio);
+ cached_activity->quanta_ratio = quanta_ratio;
+
+ /* Total ticks calculations */
+ now = ktime_get();
+ cpu_delta = now - ea->last_cpu_ts;
+ ea->last_cpu_ts = now;
+ numerator = (ea->quanta_remainder_ns + cpu_delta) * cached_activity->quanta_ratio;
+ ea->quanta_ns += numerator / TOTAL_QUANTA;
+ ea->quanta_remainder_ns = numerator % TOTAL_QUANTA;
+ ea->quanta = cpu_ns_to_guc_tsc_tick(ea->quanta_ns, cached_metadata->guc_tsc_frequency_hz);
+
+ trace_xe_guc_engine_activity(xe, ea, hwe->name, hwe->instance);
+
+ return ea->quanta;
+}
+
+static int enable_engine_activity_stats(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
+ u32 action[] = {
+ XE_GUC_ACTION_SET_DEVICE_ENGINE_ACTIVITY_BUFFER,
+ xe_bo_ggtt_addr(buffer->metadata_bo),
+ 0,
+ xe_bo_ggtt_addr(buffer->activity_bo),
+ 0,
+ };
+
+ /* Blocking here to ensure the buffers are ready before reading them */
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+static int enable_function_engine_activity_stats(struct xe_guc *guc, bool enable)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ u32 metadata_ggtt_addr = 0, ggtt_addr = 0, num_functions = 0;
+ struct engine_activity_buffer *buffer = &engine_activity->function_buffer;
+ u32 action[6];
+ int len = 0;
+
+ if (enable) {
+ metadata_ggtt_addr = xe_bo_ggtt_addr(buffer->metadata_bo);
+ ggtt_addr = xe_bo_ggtt_addr(buffer->activity_bo);
+ num_functions = engine_activity->num_functions;
+ }
+
+ action[len++] = XE_GUC_ACTION_SET_FUNCTION_ENGINE_ACTIVITY_BUFFER;
+ action[len++] = num_functions;
+ action[len++] = metadata_ggtt_addr;
+ action[len++] = 0;
+ action[len++] = ggtt_addr;
+ action[len++] = 0;
+
+ /* Blocking here to ensure the buffers are ready before reading them */
+ return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action));
+}
+
+static void engine_activity_set_cpu_ts(struct xe_guc *guc, unsigned int index)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_group *eag = &engine_activity->eag[index];
+ int i, j;
+
+ xe_gt_assert(guc_to_gt(guc), index < engine_activity->num_activity_group);
+
+ for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++)
+ for (j = 0; j < GUC_MAX_INSTANCES_PER_CLASS; j++)
+ eag->engine[i][j].last_cpu_ts = ktime_get();
+}
+
+static u32 gpm_timestamp_shift(struct xe_gt *gt)
+{
+ u32 reg;
+
+ reg = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
+
+ return 3 - REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg);
+}
+
+static bool is_function_valid(struct xe_guc *guc, unsigned int fn_id)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+
+ if (!IS_SRIOV_PF(xe) && fn_id)
+ return false;
+
+ if (engine_activity->num_functions && fn_id >= engine_activity->num_functions)
+ return false;
+
+ return true;
+}
+
+static int engine_activity_disable_function_stats(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_buffer *buffer = &engine_activity->function_buffer;
+ int ret;
+
+ if (!engine_activity->num_functions)
+ return 0;
+
+ ret = enable_function_engine_activity_stats(guc, false);
+ if (ret)
+ return ret;
+
+ free_engine_activity_buffers(buffer);
+ engine_activity->num_functions = 0;
+
+ return 0;
+}
+
+static int engine_activity_enable_function_stats(struct xe_guc *guc, int num_vfs)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct engine_activity_buffer *buffer = &engine_activity->function_buffer;
+ int ret, i;
+
+ if (!num_vfs)
+ return 0;
+
+ /* This includes 1 PF and num_vfs */
+ engine_activity->num_functions = num_vfs + 1;
+
+ ret = allocate_engine_activity_buffers(guc, buffer, engine_activity->num_functions);
+ if (ret)
+ return ret;
+
+ ret = enable_function_engine_activity_stats(guc, true);
+ if (ret) {
+ free_engine_activity_buffers(buffer);
+ engine_activity->num_functions = 0;
+ return ret;
+ }
+
+ /* skip PF as it was already setup */
+ for (i = 1; i < engine_activity->num_functions; i++)
+ engine_activity_set_cpu_ts(guc, i);
+
+ return 0;
+}
+
+/**
+ * xe_guc_engine_activity_active_ticks - Get engine active ticks
+ * @guc: The GuC object
+ * @hwe: The hw_engine object
+ * @fn_id: function id to report on
+ *
+ * Return: accumulated ticks @hwe was active since engine activity stats were enabled.
+ */
+u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id)
+{
+ if (!xe_guc_engine_activity_supported(guc))
+ return 0;
+
+ if (!is_function_valid(guc, fn_id))
+ return 0;
+
+ return get_engine_active_ticks(guc, hwe, fn_id);
+}
+
+/**
+ * xe_guc_engine_activity_total_ticks - Get engine total ticks
+ * @guc: The GuC object
+ * @hwe: The hw_engine object
+ * @fn_id: function id to report on
+ *
+ * Return: accumulated quanta of ticks allocated for the engine
+ */
+u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id)
+{
+ if (!xe_guc_engine_activity_supported(guc))
+ return 0;
+
+ if (!is_function_valid(guc, fn_id))
+ return 0;
+
+ return get_engine_total_ticks(guc, hwe, fn_id);
+}
+
+/**
+ * xe_guc_engine_activity_supported - Check support for engine activity stats
+ * @guc: The GuC object
+ *
+ * Engine activity stats is supported from GuC interface version (1.14.1)
+ *
+ * Return: true if engine activity stats supported, false otherwise
+ */
+bool xe_guc_engine_activity_supported(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+
+ return engine_activity->supported;
+}
+
+/**
+ * xe_guc_engine_activity_function_stats - Enable/Disable per-function engine activity stats
+ * @guc: The GuC object
+ * @num_vfs: number of vfs
+ * @enable: true to enable, false otherwise
+ *
+ * Return: 0 on success, negative error code otherwise
+ */
+int xe_guc_engine_activity_function_stats(struct xe_guc *guc, int num_vfs, bool enable)
+{
+ if (!xe_guc_engine_activity_supported(guc))
+ return 0;
+
+ if (enable)
+ return engine_activity_enable_function_stats(guc, num_vfs);
+
+ return engine_activity_disable_function_stats(guc);
+}
+
+/**
+ * xe_guc_engine_activity_enable_stats - Enable engine activity stats
+ * @guc: The GuC object
+ *
+ * Enable engine activity stats and set initial timestamps
+ */
+void xe_guc_engine_activity_enable_stats(struct xe_guc *guc)
+{
+ int ret;
+
+ if (!xe_guc_engine_activity_supported(guc))
+ return;
+
+ ret = enable_engine_activity_stats(guc);
+ if (ret)
+ xe_gt_err(guc_to_gt(guc), "failed to enable activity stats%d\n", ret);
+ else
+ engine_activity_set_cpu_ts(guc, 0);
+}
+
+static void engine_activity_fini(void *arg)
+{
+ struct xe_guc_engine_activity *engine_activity = arg;
+ struct engine_activity_buffer *buffer = &engine_activity->device_buffer;
+
+ free_engine_activity_buffers(buffer);
+}
+
+/**
+ * xe_guc_engine_activity_init - Initialize the engine activity data
+ * @guc: The GuC object
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int xe_guc_engine_activity_init(struct xe_guc *guc)
+{
+ struct xe_guc_engine_activity *engine_activity = &guc->engine_activity;
+ struct xe_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ engine_activity->supported = is_engine_activity_supported(guc);
+ if (!engine_activity->supported)
+ return 0;
+
+ ret = allocate_engine_activity_group(guc);
+ if (ret) {
+ xe_gt_err(gt, "failed to allocate engine activity group (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ ret = allocate_engine_activity_buffers(guc, &engine_activity->device_buffer, 1);
+ if (ret) {
+ xe_gt_err(gt, "failed to allocate engine activity buffers (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ engine_activity->gpm_timestamp_shift = gpm_timestamp_shift(gt);
+
+ return devm_add_action_or_reset(gt_to_xe(gt)->drm.dev, engine_activity_fini,
+ engine_activity);
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.h b/drivers/gpu/drm/xe/xe_guc_engine_activity.h
new file mode 100644
index 000000000000..b32926c2d208
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_GUC_ENGINE_ACTIVITY_H_
+#define _XE_GUC_ENGINE_ACTIVITY_H_
+
+#include <linux/types.h>
+
+struct xe_hw_engine;
+struct xe_guc;
+
+int xe_guc_engine_activity_init(struct xe_guc *guc);
+bool xe_guc_engine_activity_supported(struct xe_guc *guc);
+void xe_guc_engine_activity_enable_stats(struct xe_guc *guc);
+int xe_guc_engine_activity_function_stats(struct xe_guc *guc, int num_vfs, bool enable);
+u64 xe_guc_engine_activity_active_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id);
+u64 xe_guc_engine_activity_total_ticks(struct xe_guc *guc, struct xe_hw_engine *hwe,
+ unsigned int fn_id);
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h b/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h
new file mode 100644
index 000000000000..48f69ddefa36
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity_types.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_GUC_ENGINE_ACTIVITY_TYPES_H_
+#define _XE_GUC_ENGINE_ACTIVITY_TYPES_H_
+
+#include <linux/types.h>
+
+#include "xe_guc_fwif.h"
+/**
+ * struct engine_activity - Engine specific activity data
+ *
+ * Contains engine specific activity data and snapshot of the
+ * structures from GuC
+ */
+struct engine_activity {
+ /** @active: current activity */
+ u64 active;
+
+ /** @last_cpu_ts: cpu timestamp in nsec of previous sample */
+ u64 last_cpu_ts;
+
+ /** @quanta: total quanta used on HW */
+ u64 quanta;
+
+ /** @quanta_ns: total quanta_ns used on HW */
+ u64 quanta_ns;
+
+ /**
+ * @quanta_remainder_ns: remainder when the CPU time is scaled as
+ * per the quanta_ratio. This remainder is used in subsequent
+ * quanta calculations.
+ */
+ u64 quanta_remainder_ns;
+
+ /** @total: total engine activity */
+ u64 total;
+
+ /** @running: true if engine is running some work */
+ bool running;
+
+ /** @metadata: snapshot of engine activity metadata */
+ struct guc_engine_activity_metadata metadata;
+
+ /** @activity: snapshot of engine activity counter */
+ struct guc_engine_activity activity;
+};
+
+/**
+ * struct engine_activity_group - Activity data for all engines
+ */
+struct engine_activity_group {
+ /** @engine: engine specific activity data */
+ struct engine_activity engine[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
+};
+
+/**
+ * struct engine_activity_buffer - engine activity buffers
+ *
+ * This contains the buffers allocated for metadata and activity data
+ */
+struct engine_activity_buffer {
+ /** @activity_bo: object allocated to hold activity data */
+ struct xe_bo *activity_bo;
+
+ /** @metadata_bo: object allocated to hold activity metadata */
+ struct xe_bo *metadata_bo;
+};
+
+/**
+ * struct xe_guc_engine_activity - Data used by engine activity implementation
+ */
+struct xe_guc_engine_activity {
+ /** @gpm_timestamp_shift: Right shift value for the gpm timestamp */
+ u32 gpm_timestamp_shift;
+
+ /** @num_activity_group: number of activity groups */
+ u32 num_activity_group;
+
+ /** @num_functions: number of functions */
+ u32 num_functions;
+
+ /** @supported: indicates support for engine activity stats */
+ bool supported;
+
+ /**
+ * @eag: holds the device level engine activity data in native mode.
+ * In SRIOV mode, points to an array with entries which holds the engine
+ * activity data for PF and VF's
+ */
+ struct engine_activity_group *eag;
+
+ /** @device_buffer: buffer object for global engine activity */
+ struct engine_activity_buffer device_buffer;
+
+ /** @function_buffer: buffer object for per-function engine activity */
+ struct engine_activity_buffer function_buffer;
+};
+#endif
+
diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h
index 19ee71aeaf17..6f57578b07cb 100644
--- a/drivers/gpu/drm/xe/xe_guc_fwif.h
+++ b/drivers/gpu/drm/xe/xe_guc_fwif.h
@@ -8,13 +8,16 @@
#include <linux/bits.h>
+#include "abi/guc_capture_abi.h"
#include "abi/guc_klvs_abi.h"
+#include "xe_hw_engine_types.h"
#define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET 4
#define G2H_LEN_DW_DEREGISTER_CONTEXT 3
#define G2H_LEN_DW_TLB_INVALIDATE 3
#define GUC_ID_MAX 65535
+#define GUC_ID_UNKNOWN 0xffffffff
#define GUC_CONTEXT_DISABLE 0
#define GUC_CONTEXT_ENABLE 1
@@ -103,6 +106,7 @@ struct guc_update_exec_queue_policy {
#define GUC_CTL_FEATURE 2
#define GUC_CTL_ENABLE_SLPC BIT(2)
+#define GUC_CTL_ENABLE_LITE_RESTORE BIT(4)
#define GUC_CTL_DISABLE_SCHEDULER BIT(14)
#define GUC_CTL_DEBUG 3
@@ -157,24 +161,6 @@ struct guc_policies {
u32 reserved[4];
} __packed;
-/* GuC MMIO reg state struct */
-struct guc_mmio_reg {
- u32 offset;
- u32 value;
- u32 flags;
- u32 mask;
-#define GUC_REGSET_MASKED BIT(0)
-#define GUC_REGSET_MASKED_WITH_VALUE BIT(2)
-#define GUC_REGSET_RESTORE_ONLY BIT(3)
-} __packed;
-
-/* GuC register sets */
-struct guc_mmio_reg_set {
- u32 address;
- u16 count;
- u16 reserved;
-} __packed;
-
/* Generic GT SysInfo data types */
#define GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED 0
#define GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK 1
@@ -188,12 +174,6 @@ struct guc_gt_system_info {
u32 generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_MAX];
} __packed;
-enum {
- GUC_CAPTURE_LIST_INDEX_PF = 0,
- GUC_CAPTURE_LIST_INDEX_VF = 1,
- GUC_CAPTURE_LIST_INDEX_MAX = 2,
-};
-
/* GuC Additional Data Struct */
struct guc_ads {
struct guc_mmio_reg_set reg_state_list[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
@@ -228,6 +208,25 @@ struct guc_engine_usage {
struct guc_engine_usage_record engines[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
} __packed;
+/* Engine Activity stats */
+struct guc_engine_activity {
+ u16 change_num;
+ u16 quanta_ratio;
+ u32 last_update_tick;
+ u64 active_ticks;
+} __packed;
+
+struct guc_engine_activity_data {
+ struct guc_engine_activity engine_activity[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
+} __packed;
+
+struct guc_engine_activity_metadata {
+ u32 guc_tsc_frequency_hz;
+ u32 lag_latency_usec;
+ u32 global_change_num;
+ u32 reserved;
+} __packed;
+
/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
enum xe_guc_recv_message {
XE_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1),
diff --git a/drivers/gpu/drm/xe/xe_guc_klv_helpers.c b/drivers/gpu/drm/xe/xe_guc_klv_helpers.c
index 9d99fe266d97..146a6eda9e06 100644
--- a/drivers/gpu/drm/xe/xe_guc_klv_helpers.c
+++ b/drivers/gpu/drm/xe/xe_guc_klv_helpers.c
@@ -49,6 +49,8 @@ const char *xe_guc_klv_key_to_string(u16 key)
return "begin_db_id";
case GUC_KLV_VF_CFG_BEGIN_CONTEXT_ID_KEY:
return "begin_ctx_id";
+ case GUC_KLV_VF_CFG_SCHED_PRIORITY_KEY:
+ return "sched_priority";
/* VF CFG threshold keys */
#define define_threshold_key_to_string_case(TAG, NAME, ...) \
diff --git a/drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h b/drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h
index da0fedbbdbaf..da10cf0389cb 100644
--- a/drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h
+++ b/drivers/gpu/drm/xe/xe_guc_klv_thresholds_set.h
@@ -18,6 +18,13 @@
MAKE_GUC_KLV_KEY(CONCATENATE(VF_CFG_THRESHOLD_, TAG))
/**
+ * MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN - Prepare the name of the KLV length constant.
+ * @TAG: unique tag of the GuC threshold KLV key.
+ */
+#define MAKE_GUC_KLV_VF_CFG_THRESHOLD_LEN(TAG) \
+ MAKE_GUC_KLV_LEN(CONCATENATE(VF_CFG_THRESHOLD_, TAG))
+
+/**
* xe_guc_klv_threshold_key_to_index - Find index of the tracked GuC threshold.
* @key: GuC threshold KLV key.
*
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index a37ee3419428..38039c411387 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -5,13 +5,26 @@
#include "xe_guc_log.h"
+#include <linux/fault-inject.h>
+
#include <drm/drm_managed.h>
+#include "regs/xe_guc_regs.h"
#include "xe_bo.h"
+#include "xe_devcoredump.h"
+#include "xe_force_wake.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_map.h"
+#include "xe_mmio.h"
#include "xe_module.h"
+static struct xe_guc *
+log_to_guc(struct xe_guc_log *log)
+{
+ return container_of(log, struct xe_guc, log);
+}
+
static struct xe_gt *
log_to_gt(struct xe_guc_log *log)
{
@@ -49,34 +62,195 @@ static size_t guc_log_size(void)
CAPTURE_BUFFER_SIZE;
}
-void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p)
+#define GUC_LOG_CHUNK_SIZE SZ_2M
+
+static struct xe_guc_log_snapshot *xe_guc_log_snapshot_alloc(struct xe_guc_log *log, bool atomic)
{
+ struct xe_guc_log_snapshot *snapshot;
+ size_t remain;
+ int i;
+
+ snapshot = kzalloc(sizeof(*snapshot), atomic ? GFP_ATOMIC : GFP_KERNEL);
+ if (!snapshot)
+ return NULL;
+
+ /*
+ * NB: kmalloc has a hard limit well below the maximum GuC log buffer size.
+ * Also, can't use vmalloc as might be called from atomic context. So need
+ * to break the buffer up into smaller chunks that can be allocated.
+ */
+ snapshot->size = log->bo->size;
+ snapshot->num_chunks = DIV_ROUND_UP(snapshot->size, GUC_LOG_CHUNK_SIZE);
+
+ snapshot->copy = kcalloc(snapshot->num_chunks, sizeof(*snapshot->copy),
+ atomic ? GFP_ATOMIC : GFP_KERNEL);
+ if (!snapshot->copy)
+ goto fail_snap;
+
+ remain = snapshot->size;
+ for (i = 0; i < snapshot->num_chunks; i++) {
+ size_t size = min(GUC_LOG_CHUNK_SIZE, remain);
+
+ snapshot->copy[i] = kmalloc(size, atomic ? GFP_ATOMIC : GFP_KERNEL);
+ if (!snapshot->copy[i])
+ goto fail_copy;
+ remain -= size;
+ }
+
+ return snapshot;
+
+fail_copy:
+ for (i = 0; i < snapshot->num_chunks; i++)
+ kfree(snapshot->copy[i]);
+ kfree(snapshot->copy);
+fail_snap:
+ kfree(snapshot);
+ return NULL;
+}
+
+/**
+ * xe_guc_log_snapshot_free - free a previously captured GuC log snapshot
+ * @snapshot: GuC log snapshot structure
+ *
+ * Return: pointer to a newly allocated snapshot object or null if out of memory. Caller is
+ * responsible for calling xe_guc_log_snapshot_free when done with the snapshot.
+ */
+void xe_guc_log_snapshot_free(struct xe_guc_log_snapshot *snapshot)
+{
+ int i;
+
+ if (!snapshot)
+ return;
+
+ if (snapshot->copy) {
+ for (i = 0; i < snapshot->num_chunks; i++)
+ kfree(snapshot->copy[i]);
+ kfree(snapshot->copy);
+ }
+
+ kfree(snapshot);
+}
+
+/**
+ * xe_guc_log_snapshot_capture - create a new snapshot copy the GuC log for later dumping
+ * @log: GuC log structure
+ * @atomic: is the call inside an atomic section of some kind?
+ *
+ * Return: pointer to a newly allocated snapshot object or null if out of memory. Caller is
+ * responsible for calling xe_guc_log_snapshot_free when done with the snapshot.
+ */
+struct xe_guc_log_snapshot *xe_guc_log_snapshot_capture(struct xe_guc_log *log, bool atomic)
+{
+ struct xe_guc_log_snapshot *snapshot;
struct xe_device *xe = log_to_xe(log);
- size_t size;
- int i, j;
+ struct xe_guc *guc = log_to_guc(log);
+ struct xe_gt *gt = log_to_gt(log);
+ unsigned int fw_ref;
+ size_t remain;
+ int i;
- xe_assert(xe, log->bo);
+ if (!log->bo)
+ return NULL;
- size = log->bo->size;
+ snapshot = xe_guc_log_snapshot_alloc(log, atomic);
+ if (!snapshot)
+ return NULL;
-#define DW_PER_READ 128
- xe_assert(xe, !(size % (DW_PER_READ * sizeof(u32))));
- for (i = 0; i < size / sizeof(u32); i += DW_PER_READ) {
- u32 read[DW_PER_READ];
+ remain = snapshot->size;
+ for (i = 0; i < snapshot->num_chunks; i++) {
+ size_t size = min(GUC_LOG_CHUNK_SIZE, remain);
- xe_map_memcpy_from(xe, read, &log->bo->vmap, i * sizeof(u32),
- DW_PER_READ * sizeof(u32));
-#define DW_PER_PRINT 4
- for (j = 0; j < DW_PER_READ / DW_PER_PRINT; ++j) {
- u32 *print = read + j * DW_PER_PRINT;
+ xe_map_memcpy_from(xe, snapshot->copy[i], &log->bo->vmap,
+ i * GUC_LOG_CHUNK_SIZE, size);
+ remain -= size;
+ }
- drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n",
- *(print + 0), *(print + 1),
- *(print + 2), *(print + 3));
- }
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref) {
+ snapshot->stamp = ~0ULL;
+ } else {
+ snapshot->stamp = xe_mmio_read64_2x32(&gt->mmio, GUC_PMTIMESTAMP_LO);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ }
+ snapshot->ktime = ktime_get_boottime_ns();
+ snapshot->level = log->level;
+ snapshot->ver_found = guc->fw.versions.found[XE_UC_FW_VER_RELEASE];
+ snapshot->ver_want = guc->fw.versions.wanted;
+ snapshot->path = guc->fw.path;
+
+ return snapshot;
+}
+
+/**
+ * xe_guc_log_snapshot_print - dump a previously saved copy of the GuC log to some useful location
+ * @snapshot: a snapshot of the GuC log
+ * @p: the printer object to output to
+ */
+void xe_guc_log_snapshot_print(struct xe_guc_log_snapshot *snapshot, struct drm_printer *p)
+{
+ size_t remain;
+ int i;
+
+ if (!snapshot) {
+ drm_printf(p, "GuC log snapshot not allocated!\n");
+ return;
+ }
+
+ drm_printf(p, "GuC firmware: %s\n", snapshot->path);
+ drm_printf(p, "GuC version: %u.%u.%u (wanted %u.%u.%u)\n",
+ snapshot->ver_found.major, snapshot->ver_found.minor, snapshot->ver_found.patch,
+ snapshot->ver_want.major, snapshot->ver_want.minor, snapshot->ver_want.patch);
+ drm_printf(p, "Kernel timestamp: 0x%08llX [%llu]\n", snapshot->ktime, snapshot->ktime);
+ drm_printf(p, "GuC timestamp: 0x%08llX [%llu]\n", snapshot->stamp, snapshot->stamp);
+ drm_printf(p, "Log level: %u\n", snapshot->level);
+
+ drm_printf(p, "[LOG].length: 0x%zx\n", snapshot->size);
+ remain = snapshot->size;
+ for (i = 0; i < snapshot->num_chunks; i++) {
+ size_t size = min(GUC_LOG_CHUNK_SIZE, remain);
+ const char *prefix = i ? NULL : "[LOG].data";
+ char suffix = i == snapshot->num_chunks - 1 ? '\n' : 0;
+
+ xe_print_blob_ascii85(p, prefix, suffix, snapshot->copy[i], 0, size);
+ remain -= size;
}
}
+/**
+ * xe_guc_log_print_dmesg - dump a copy of the GuC log to dmesg
+ * @log: GuC log structure
+ */
+void xe_guc_log_print_dmesg(struct xe_guc_log *log)
+{
+ struct xe_gt *gt = log_to_gt(log);
+ static int g_count;
+ struct drm_printer ip = xe_gt_info_printer(gt);
+ struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count);
+
+ drm_printf(&lp, "Dumping GuC log for %ps...\n", __builtin_return_address(0));
+
+ xe_guc_log_print(log, &lp);
+
+ drm_printf(&lp, "Done.\n");
+}
+
+/**
+ * xe_guc_log_print - dump a copy of the GuC log to some useful location
+ * @log: GuC log structure
+ * @p: the printer object to output to
+ */
+void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p)
+{
+ struct xe_guc_log_snapshot *snapshot;
+
+ drm_printf(p, "**** GuC Log ****\n");
+
+ snapshot = xe_guc_log_snapshot_capture(log, false);
+ drm_printf(p, "CS reference clock: %u\n", log_to_gt(log)->info.reference_clock);
+ xe_guc_log_snapshot_print(snapshot, p);
+ xe_guc_log_snapshot_free(snapshot);
+}
+
int xe_guc_log_init(struct xe_guc_log *log)
{
struct xe_device *xe = log_to_xe(log);
@@ -86,7 +260,8 @@ int xe_guc_log_init(struct xe_guc_log *log)
bo = xe_managed_bo_create_pin_map(xe, tile, guc_log_size(),
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -96,3 +271,105 @@ int xe_guc_log_init(struct xe_guc_log *log)
return 0;
}
+
+ALLOW_ERROR_INJECTION(xe_guc_log_init, ERRNO); /* See xe_pci_probe() */
+
+static u32 xe_guc_log_section_size_crash(struct xe_guc_log *log)
+{
+ return CRASH_BUFFER_SIZE;
+}
+
+static u32 xe_guc_log_section_size_debug(struct xe_guc_log *log)
+{
+ return DEBUG_BUFFER_SIZE;
+}
+
+/**
+ * xe_guc_log_section_size_capture - Get capture buffer size within log sections.
+ * @log: The log object.
+ *
+ * This function will return the capture buffer size within log sections.
+ *
+ * Return: capture buffer size.
+ */
+u32 xe_guc_log_section_size_capture(struct xe_guc_log *log)
+{
+ return CAPTURE_BUFFER_SIZE;
+}
+
+/**
+ * xe_guc_get_log_buffer_size - Get log buffer size for a type.
+ * @log: The log object.
+ * @type: The log buffer type
+ *
+ * Return: buffer size.
+ */
+u32 xe_guc_get_log_buffer_size(struct xe_guc_log *log, enum guc_log_buffer_type type)
+{
+ switch (type) {
+ case GUC_LOG_BUFFER_CRASH_DUMP:
+ return xe_guc_log_section_size_crash(log);
+ case GUC_LOG_BUFFER_DEBUG:
+ return xe_guc_log_section_size_debug(log);
+ case GUC_LOG_BUFFER_CAPTURE:
+ return xe_guc_log_section_size_capture(log);
+ }
+ return 0;
+}
+
+/**
+ * xe_guc_get_log_buffer_offset - Get offset in log buffer for a type.
+ * @log: The log object.
+ * @type: The log buffer type
+ *
+ * This function will return the offset in the log buffer for a type.
+ * Return: buffer offset.
+ */
+u32 xe_guc_get_log_buffer_offset(struct xe_guc_log *log, enum guc_log_buffer_type type)
+{
+ enum guc_log_buffer_type i;
+ u32 offset = PAGE_SIZE;/* for the log_buffer_states */
+
+ for (i = GUC_LOG_BUFFER_CRASH_DUMP; i < GUC_LOG_BUFFER_TYPE_MAX; ++i) {
+ if (i == type)
+ break;
+ offset += xe_guc_get_log_buffer_size(log, i);
+ }
+
+ return offset;
+}
+
+/**
+ * xe_guc_check_log_buf_overflow - Check if log buffer overflowed
+ * @log: The log object.
+ * @type: The log buffer type
+ * @full_cnt: The count of buffer full
+ *
+ * This function will check count of buffer full against previous, mismatch
+ * indicate overflowed.
+ * Update the sampled_overflow counter, if the 4 bit counter overflowed, add
+ * up 16 to correct the value.
+ *
+ * Return: True if overflowed.
+ */
+bool xe_guc_check_log_buf_overflow(struct xe_guc_log *log, enum guc_log_buffer_type type,
+ unsigned int full_cnt)
+{
+ unsigned int prev_full_cnt = log->stats[type].sampled_overflow;
+ bool overflow = false;
+
+ if (full_cnt != prev_full_cnt) {
+ overflow = true;
+
+ log->stats[type].overflow = full_cnt;
+ log->stats[type].sampled_overflow += full_cnt - prev_full_cnt;
+
+ if (full_cnt < prev_full_cnt) {
+ /* buffer_full_cnt is a 4 bit counter */
+ log->stats[type].sampled_overflow += 16;
+ }
+ xe_gt_notice(log_to_gt(log), "log buffer overflow\n");
+ }
+
+ return overflow;
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_log.h b/drivers/gpu/drm/xe/xe_guc_log.h
index 2d25ab28b4b3..5b896f5fafaf 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.h
+++ b/drivers/gpu/drm/xe/xe_guc_log.h
@@ -7,8 +7,10 @@
#define _XE_GUC_LOG_H_
#include "xe_guc_log_types.h"
+#include "abi/guc_log_abi.h"
struct drm_printer;
+struct xe_device;
#if IS_ENABLED(CONFIG_DRM_XE_LARGE_GUC_BUFFER)
#define CRASH_BUFFER_SIZE SZ_1M
@@ -17,7 +19,7 @@ struct drm_printer;
#else
#define CRASH_BUFFER_SIZE SZ_8K
#define DEBUG_BUFFER_SIZE SZ_64K
-#define CAPTURE_BUFFER_SIZE SZ_16K
+#define CAPTURE_BUFFER_SIZE SZ_1M
#endif
/*
* While we're using plain log level in i915, GuC controls are much more...
@@ -38,6 +40,10 @@ struct drm_printer;
int xe_guc_log_init(struct xe_guc_log *log);
void xe_guc_log_print(struct xe_guc_log *log, struct drm_printer *p);
+void xe_guc_log_print_dmesg(struct xe_guc_log *log);
+struct xe_guc_log_snapshot *xe_guc_log_snapshot_capture(struct xe_guc_log *log, bool atomic);
+void xe_guc_log_snapshot_print(struct xe_guc_log_snapshot *snapshot, struct drm_printer *p);
+void xe_guc_log_snapshot_free(struct xe_guc_log_snapshot *snapshot);
static inline u32
xe_guc_log_get_level(struct xe_guc_log *log)
@@ -45,4 +51,11 @@ xe_guc_log_get_level(struct xe_guc_log *log)
return log->level;
}
+u32 xe_guc_log_section_size_capture(struct xe_guc_log *log);
+u32 xe_guc_get_log_buffer_size(struct xe_guc_log *log, enum guc_log_buffer_type type);
+u32 xe_guc_get_log_buffer_offset(struct xe_guc_log *log, enum guc_log_buffer_type type);
+bool xe_guc_check_log_buf_overflow(struct xe_guc_log *log,
+ enum guc_log_buffer_type type,
+ unsigned int full_cnt);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_log_types.h b/drivers/gpu/drm/xe/xe_guc_log_types.h
index 125080d138a7..b3d5c72ac752 100644
--- a/drivers/gpu/drm/xe/xe_guc_log_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_log_types.h
@@ -7,10 +7,38 @@
#define _XE_GUC_LOG_TYPES_H_
#include <linux/types.h>
+#include "abi/guc_log_abi.h"
+
+#include "xe_uc_fw_types.h"
struct xe_bo;
/**
+ * struct xe_guc_log_snapshot:
+ * Capture of the GuC log plus various state useful for decoding the log
+ */
+struct xe_guc_log_snapshot {
+ /** @size: Size in bytes of the @copy allocation */
+ size_t size;
+ /** @copy: Host memory copy of the log buffer for later dumping, split into chunks */
+ void **copy;
+ /** @num_chunks: Number of chunks within @copy */
+ int num_chunks;
+ /** @ktime: Kernel time the snapshot was taken */
+ u64 ktime;
+ /** @stamp: GuC timestamp at which the snapshot was taken */
+ u64 stamp;
+ /** @level: GuC log verbosity level */
+ u32 level;
+ /** @ver_found: GuC firmware version */
+ struct xe_uc_fw_version ver_found;
+ /** @ver_want: GuC firmware version that driver expected */
+ struct xe_uc_fw_version ver_want;
+ /** @path: Path of GuC firmware blob */
+ const char *path;
+};
+
+/**
* struct xe_guc_log - GuC log
*/
struct xe_guc_log {
@@ -18,6 +46,12 @@ struct xe_guc_log {
u32 level;
/** @bo: XE BO for GuC log */
struct xe_bo *bo;
+ /** @stats: logging related stats */
+ struct {
+ u32 sampled_overflow;
+ u32 overflow;
+ u32 flush;
+ } stats[GUC_LOG_BUFFER_TYPE_MAX];
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 034b29984d5e..3beaaa7b25c1 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -6,8 +6,10 @@
#include "xe_guc_pc.h"
#include <linux/delay.h>
+#include <linux/ktime.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <generated/xe_wa_oob.h>
#include "abi/guc_actions_slpc_abi.h"
@@ -19,6 +21,7 @@
#include "xe_gt.h"
#include "xe_gt_idle.h"
#include "xe_gt_printk.h"
+#include "xe_gt_throttle.h"
#include "xe_gt_types.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"
@@ -38,6 +41,7 @@
#define FREQ_INFO_REC XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x5ef0)
#define RPE_MASK REG_GENMASK(15, 8)
+#define RPA_MASK REG_GENMASK(31, 16)
#define GT_PERF_STATUS XE_REG(0x1381b4)
#define CAGF_MASK REG_GENMASK(19, 11)
@@ -48,6 +52,9 @@
#define LNL_MERT_FREQ_CAP 800
#define BMG_MERT_FREQ_CAP 2133
+#define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
+#define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
+
/**
* DOC: GuC Power Conservation (PC)
*
@@ -112,9 +119,10 @@ static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
static int wait_for_pc_state(struct xe_guc_pc *pc,
- enum slpc_global_state state)
+ enum slpc_global_state state,
+ int timeout_ms)
{
- int timeout_us = 5000; /* rought 5ms, but no need for precision */
+ int timeout_us = 1000 * timeout_ms;
int slept, wait = 10;
xe_device_assert_mem_access(pc_to_xe(pc));
@@ -163,7 +171,8 @@ static int pc_action_query_task_state(struct xe_guc_pc *pc)
};
int ret;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS))
return -EAGAIN;
/* Blocking here to ensure the results are ready before reading them */
@@ -186,7 +195,8 @@ static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
};
int ret;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS))
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
@@ -207,7 +217,8 @@ static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
struct xe_guc_ct *ct = &pc_to_guc(pc)->ct;
int ret;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING))
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS))
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
@@ -262,7 +273,7 @@ static void pc_set_manual_rp_ctrl(struct xe_guc_pc *pc, bool enable)
u32 state = enable ? RPSWCTL_ENABLE : RPSWCTL_DISABLE;
/* Allow/Disallow punit to process software freq requests */
- xe_mmio_write32(gt, RP_CONTROL, state);
+ xe_mmio_write32(&gt->mmio, RP_CONTROL, state);
}
static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
@@ -274,7 +285,7 @@ static void pc_set_cur_freq(struct xe_guc_pc *pc, u32 freq)
/* Req freq is in units of 16.66 Mhz */
rpnswreq = REG_FIELD_PREP(REQ_RATIO_MASK, encode_freq(freq));
- xe_mmio_write32(gt, RPNSWREQ, rpnswreq);
+ xe_mmio_write32(&gt->mmio, RPNSWREQ, rpnswreq);
/* Sleep for a small time to allow pcode to respond */
usleep_range(100, 300);
@@ -328,19 +339,52 @@ static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
freq);
}
+static void mtl_update_rpa_value(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ u32 reg;
+
+ if (xe_gt_is_media_type(gt))
+ reg = xe_mmio_read32(&gt->mmio, MTL_MPA_FREQUENCY);
+ else
+ reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPA_FREQUENCY);
+
+ pc->rpa_freq = decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
+}
+
static void mtl_update_rpe_value(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
u32 reg;
if (xe_gt_is_media_type(gt))
- reg = xe_mmio_read32(gt, MTL_MPE_FREQUENCY);
+ reg = xe_mmio_read32(&gt->mmio, MTL_MPE_FREQUENCY);
else
- reg = xe_mmio_read32(gt, MTL_GT_RPE_FREQUENCY);
+ reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPE_FREQUENCY);
pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
}
+static void tgl_update_rpa_value(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 reg;
+
+ /*
+ * For PVC we still need to use fused RP0 as the approximation for RPa
+ * For other platforms than PVC we get the resolved RPa directly from
+ * PCODE at a different register
+ */
+ if (xe->info.platform == XE_PVC) {
+ reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
+ pc->rpa_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+ } else {
+ reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
+ pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+ }
+}
+
static void tgl_update_rpe_value(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
@@ -352,12 +396,13 @@ static void tgl_update_rpe_value(struct xe_guc_pc *pc)
* For other platforms than PVC we get the resolved RPe directly from
* PCODE at a different register
*/
- if (xe->info.platform == XE_PVC)
- reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
- else
- reg = xe_mmio_read32(gt, FREQ_INFO_REC);
-
- pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+ if (xe->info.platform == XE_PVC) {
+ reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
+ pc->rpe_freq = REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+ } else {
+ reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
+ pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+ }
}
static void pc_update_rp_values(struct xe_guc_pc *pc)
@@ -365,10 +410,13 @@ static void pc_update_rp_values(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
struct xe_device *xe = gt_to_xe(gt);
- if (GRAPHICS_VERx100(xe) >= 1270)
+ if (GRAPHICS_VERx100(xe) >= 1270) {
+ mtl_update_rpa_value(pc);
mtl_update_rpe_value(pc);
- else
+ } else {
+ tgl_update_rpa_value(pc);
tgl_update_rpe_value(pc);
+ }
/*
* RPe is decided at runtime by PCODE. In the rare case where that's
@@ -392,10 +440,10 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
/* When in RC6, actual frequency reported will be 0. */
if (GRAPHICS_VERx100(xe) >= 1270) {
- freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
+ freq = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
} else {
- freq = xe_mmio_read32(gt, GT_PERF_STATUS);
+ freq = xe_mmio_read32(&gt->mmio, GT_PERF_STATUS);
freq = REG_FIELD_GET(CAGF_MASK, freq);
}
@@ -404,6 +452,30 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
return freq;
}
+static u32 get_cur_freq(struct xe_gt *gt)
+{
+ u32 freq;
+
+ freq = xe_mmio_read32(&gt->mmio, RPNSWREQ);
+ freq = REG_FIELD_GET(REQ_RATIO_MASK, freq);
+ return decode_freq(freq);
+}
+
+/**
+ * xe_guc_pc_get_cur_freq_fw - With fw held, get requested frequency
+ * @pc: The GuC PC
+ *
+ * Returns: the requested frequency for that GT instance
+ */
+u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
+
+ return get_cur_freq(gt);
+}
+
/**
* xe_guc_pc_get_cur_freq - Get Current requested frequency
* @pc: The GuC PC
@@ -415,22 +487,21 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
{
struct xe_gt *gt = pc_to_gt(pc);
- int ret;
+ unsigned int fw_ref;
/*
* GuC SLPC plays with cur freq request when GuCRC is enabled
* Block RC6 for a more reliable read.
*/
- ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (ret)
- return ret;
-
- *freq = xe_mmio_read32(gt, RPNSWREQ);
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return -ETIMEDOUT;
+ }
- *freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq);
- *freq = decode_freq(*freq);
+ *freq = get_cur_freq(gt);
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
return 0;
}
@@ -446,6 +517,19 @@ u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
}
/**
+ * xe_guc_pc_get_rpa_freq - Get the RPa freq
+ * @pc: The GuC PC
+ *
+ * Returns: RPa freq.
+ */
+u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
+{
+ pc_update_rp_values(pc);
+
+ return pc->rpa_freq;
+}
+
+/**
* xe_guc_pc_get_rpe_freq - Get the RPe freq
* @pc: The GuC PC
*
@@ -479,9 +563,10 @@ u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc)
*/
int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
{
- struct xe_gt *gt = pc_to_gt(pc);
int ret;
+ xe_device_assert_mem_access(pc_to_xe(pc));
+
mutex_lock(&pc->freq_lock);
if (!pc->freq_ready) {
/* Might be in the middle of a gt reset */
@@ -489,22 +574,12 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
goto out;
}
- /*
- * GuC SLPC plays with min freq request when GuCRC is enabled
- * Block RC6 for a more reliable read.
- */
- ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (ret)
- goto out;
-
ret = pc_action_query_task_state(pc);
if (ret)
- goto fw;
+ goto out;
*freq = pc_get_min_freq(pc);
-fw:
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
out:
mutex_unlock(&pc->freq_lock);
return ret;
@@ -612,10 +687,10 @@ enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
u32 reg, gt_c_state;
if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
- reg = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
+ reg = xe_mmio_read32(&gt->mmio, MTL_MIRROR_TARGET_WP1);
gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
} else {
- reg = xe_mmio_read32(gt, GT_CORE_STATUS);
+ reg = xe_mmio_read32(&gt->mmio, GT_CORE_STATUS);
gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
}
@@ -638,7 +713,7 @@ u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
u32 reg;
- reg = xe_mmio_read32(gt, GT_GFX_RC6);
+ reg = xe_mmio_read32(&gt->mmio, GT_GFX_RC6);
return reg;
}
@@ -652,7 +727,7 @@ u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
u64 reg;
- reg = xe_mmio_read32(gt, MTL_MEDIA_MC6);
+ reg = xe_mmio_read32(&gt->mmio, MTL_MEDIA_MC6);
return reg;
}
@@ -665,9 +740,9 @@ static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
xe_device_assert_mem_access(pc_to_xe(pc));
if (xe_gt_is_media_type(gt))
- reg = xe_mmio_read32(gt, MTL_MEDIAP_STATE_CAP);
+ reg = xe_mmio_read32(&gt->mmio, MTL_MEDIAP_STATE_CAP);
else
- reg = xe_mmio_read32(gt, MTL_RP_STATE_CAP);
+ reg = xe_mmio_read32(&gt->mmio, MTL_RP_STATE_CAP);
pc->rp0_freq = decode_freq(REG_FIELD_GET(MTL_RP0_CAP_MASK, reg));
@@ -683,9 +758,9 @@ static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
xe_device_assert_mem_access(pc_to_xe(pc));
if (xe->info.platform == XE_PVC)
- reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
+ reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
else
- reg = xe_mmio_read32(gt, RP_STATE_CAP);
+ reg = xe_mmio_read32(&gt->mmio, RP_STATE_CAP);
pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
}
@@ -855,6 +930,7 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
{
struct xe_device *xe = pc_to_xe(pc);
struct xe_gt *gt = pc_to_gt(pc);
+ unsigned int fw_ref;
int ret = 0;
if (xe->info.skip_guc_pc)
@@ -864,13 +940,15 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
if (ret)
return ret;
- ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (ret)
- return ret;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return -ETIMEDOUT;
+ }
xe_gt_idle_disable_c6(gt);
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
return 0;
}
@@ -947,6 +1025,17 @@ out:
return ret;
}
+static int pc_action_set_strategy(struct xe_guc_pc *pc, u32 val)
+{
+ int ret = 0;
+
+ ret = pc_action_set_param(pc,
+ SLPC_PARAM_STRATEGIES,
+ val);
+
+ return ret;
+}
+
/**
* xe_guc_pc_start - Start GuC's Power Conservation component
* @pc: Xe_GuC_PC instance
@@ -956,13 +1045,17 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
struct xe_device *xe = pc_to_xe(pc);
struct xe_gt *gt = pc_to_gt(pc);
u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
+ unsigned int fw_ref;
+ ktime_t earlier;
int ret;
xe_gt_assert(gt, xe_device_uc_enabled(xe));
- ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (ret)
- return ret;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return -ETIMEDOUT;
+ }
if (xe->info.skip_guc_pc) {
if (xe->info.platform != XE_PVC)
@@ -975,17 +1068,29 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
goto out;
}
- memset(pc->bo->vmap.vaddr, 0, size);
+ xe_map_memset(xe, &pc->bo->vmap, 0, 0, size);
slpc_shared_data_write(pc, header.size, size);
+ earlier = ktime_get();
ret = pc_action_reset(pc);
if (ret)
goto out;
- if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) {
- xe_gt_err(gt, "GuC PC Start failed\n");
- ret = -EIO;
- goto out;
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_TIMEOUT_MS)) {
+ xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n",
+ xe_guc_pc_get_act_freq(pc), get_cur_freq(gt),
+ xe_gt_throttle_get_limit_reasons(gt));
+
+ if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING,
+ SLPC_RESET_EXTENDED_TIMEOUT_MS)) {
+ xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ xe_gt_warn(gt, "GuC PC excessive start time: %lldms",
+ ktime_ms_delta(ktime_get(), earlier));
}
ret = pc_init_freqs(pc);
@@ -1003,9 +1108,14 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
}
ret = pc_action_setup_gucrc(pc, GUCRC_FIRMWARE_CONTROL);
+ if (ret)
+ goto out;
+
+ /* Enable SLPC Optimized Strategy for compute */
+ ret = pc_action_set_strategy(pc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
out:
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
return ret;
}
@@ -1037,18 +1147,19 @@ static void xe_guc_pc_fini_hw(void *arg)
{
struct xe_guc_pc *pc = arg;
struct xe_device *xe = pc_to_xe(pc);
+ unsigned int fw_ref;
if (xe_device_wedged(xe))
return;
- XE_WARN_ON(xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL));
+ fw_ref = xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
xe_guc_pc_gucrc_disable(pc);
XE_WARN_ON(xe_guc_pc_stop(pc));
/* Bind requested freq to mert_freq_cap before unload */
pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq));
- xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
+ xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), fw_ref);
}
/**
@@ -1074,7 +1185,8 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
bo = xe_managed_bo_create_pin_map(xe, tile, size,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -1082,3 +1194,61 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc);
}
+
+static const char *pc_get_state_string(struct xe_guc_pc *pc)
+{
+ switch (slpc_shared_data_read(pc, header.global_state)) {
+ case SLPC_GLOBAL_STATE_NOT_RUNNING:
+ return "not running";
+ case SLPC_GLOBAL_STATE_INITIALIZING:
+ return "initializing";
+ case SLPC_GLOBAL_STATE_RESETTING:
+ return "resetting";
+ case SLPC_GLOBAL_STATE_RUNNING:
+ return "running";
+ case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
+ return "shutting down";
+ case SLPC_GLOBAL_STATE_ERROR:
+ return "error";
+ default:
+ return "unknown";
+ }
+}
+
+/**
+ * xe_guc_pc_print - Print GuC's Power Conservation information for debug
+ * @pc: Xe_GuC_PC instance
+ * @p: drm_printer
+ */
+void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p)
+{
+ drm_printf(p, "SLPC Shared Data Header:\n");
+ drm_printf(p, "\tSize: %x\n", slpc_shared_data_read(pc, header.size));
+ drm_printf(p, "\tGlobal State: %s\n", pc_get_state_string(pc));
+
+ if (pc_action_query_task_state(pc))
+ return;
+
+ drm_printf(p, "\nSLPC Tasks Status:\n");
+ drm_printf(p, "\tGTPERF enabled: %s\n",
+ str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
+ SLPC_GTPERF_TASK_ENABLED));
+ drm_printf(p, "\tDCC enabled: %s\n",
+ str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
+ SLPC_DCC_TASK_ENABLED));
+ drm_printf(p, "\tDCC in use: %s\n",
+ str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
+ SLPC_IN_DCC));
+ drm_printf(p, "\tBalancer enabled: %s\n",
+ str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
+ SLPC_BALANCER_ENABLED));
+ drm_printf(p, "\tIBC enabled: %s\n",
+ str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
+ SLPC_IBC_TASK_ENABLED));
+ drm_printf(p, "\tBalancer IA LMT enabled: %s\n",
+ str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
+ SLPC_BALANCER_IA_LMT_ENABLED));
+ drm_printf(p, "\tBalancer IA LMT active: %s\n",
+ str_yes_no(slpc_shared_data_read(pc, task_state_data.status) &
+ SLPC_BALANCER_IA_LMT_ACTIVE));
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h
index efda432fadfc..0a2664d5c811 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc.h
@@ -10,6 +10,7 @@
struct xe_guc_pc;
enum slpc_gucrc_mode;
+struct drm_printer;
int xe_guc_pc_init(struct xe_guc_pc *pc);
int xe_guc_pc_start(struct xe_guc_pc *pc);
@@ -17,10 +18,13 @@ int xe_guc_pc_stop(struct xe_guc_pc *pc);
int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc);
int xe_guc_pc_override_gucrc_mode(struct xe_guc_pc *pc, enum slpc_gucrc_mode mode);
int xe_guc_pc_unset_gucrc_mode(struct xe_guc_pc *pc);
+void xe_guc_pc_print(struct xe_guc_pc *pc, struct drm_printer *p);
u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc);
int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq);
+u32 xe_guc_pc_get_cur_freq_fw(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc);
+u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc);
u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc);
int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc_types.h b/drivers/gpu/drm/xe/xe_guc_pc_types.h
index 13810be015db..2978ac9a249b 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc_types.h
@@ -17,6 +17,8 @@ struct xe_guc_pc {
struct xe_bo *bo;
/** @rp0_freq: HW RP0 frequency - The Maximum one */
u32 rp0_freq;
+ /** @rpa_freq: HW RPa frequency - The Achievable one */
+ u32 rpa_freq;
/** @rpe_freq: HW RPe frequency - The Efficient one */
u32 rpe_freq;
/** @rpn_freq: HW RPN frequency - The Minimum one */
diff --git a/drivers/gpu/drm/xe/xe_guc_relay.c b/drivers/gpu/drm/xe/xe_guc_relay.c
index ade6162dc259..e5dc94f3e618 100644
--- a/drivers/gpu/drm/xe/xe_guc_relay.c
+++ b/drivers/gpu/drm/xe/xe_guc_relay.c
@@ -5,6 +5,7 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
+#include <linux/fault-inject.h>
#include <drm/drm_managed.h>
@@ -224,7 +225,7 @@ __relay_get_transaction(struct xe_guc_relay *relay, bool incoming, u32 remote, u
* with CTB lock held which is marked as used in the reclaim path.
* Btw, that's one of the reason why we use mempool here!
*/
- txn = mempool_alloc(&relay->pool, incoming ? GFP_ATOMIC : GFP_KERNEL);
+ txn = mempool_alloc(&relay->pool, incoming ? GFP_ATOMIC : GFP_NOWAIT);
if (!txn)
return ERR_PTR(-ENOMEM);
@@ -355,6 +356,7 @@ int xe_guc_relay_init(struct xe_guc_relay *relay)
return drmm_add_action_or_reset(&xe->drm, __fini_relay, relay);
}
+ALLOW_ERROR_INJECTION(xe_guc_relay_init, ERRNO); /* See xe_pci_probe() */
static u32 to_relay_error(int err)
{
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index d333be9c4227..9567f6700cf2 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -15,6 +15,7 @@
#include <drm/drm_managed.h>
#include "abi/guc_actions_abi.h"
+#include "abi/guc_actions_slpc_abi.h"
#include "abi/guc_klvs_abi.h"
#include "regs/xe_lrc_layout.h"
#include "xe_assert.h"
@@ -27,6 +28,7 @@
#include "xe_gt_clock.h"
#include "xe_gt_printk.h"
#include "xe_guc.h"
+#include "xe_guc_capture.h"
#include "xe_guc_ct.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_guc_id_mgr.h"
@@ -227,6 +229,17 @@ static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
static void guc_submit_fini(struct drm_device *drm, void *arg)
{
struct xe_guc *guc = arg;
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ ret = wait_event_timeout(guc->submission_state.fini_wq,
+ xa_empty(&guc->submission_state.exec_queue_lookup),
+ HZ * 5);
+
+ drain_workqueue(xe->destroy_wq);
+
+ xe_gt_assert(gt, ret);
xa_destroy(&guc->submission_state.exec_queue_lookup);
}
@@ -298,6 +311,8 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
primelockdep(guc);
+ guc->submission_state.initialized = true;
+
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
}
@@ -399,6 +414,7 @@ static void __guc_exec_queue_policy_add_##func(struct exec_queue_policy *policy,
MAKE_EXEC_QUEUE_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
MAKE_EXEC_QUEUE_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
MAKE_EXEC_QUEUE_POLICY_ADD(priority, SCHEDULING_PRIORITY)
+MAKE_EXEC_QUEUE_POLICY_ADD(slpc_exec_queue_freq_req, SLPM_GT_FREQUENCY)
#undef MAKE_EXEC_QUEUE_POLICY_ADD
static const int xe_exec_queue_prio_to_guc[] = {
@@ -411,17 +427,22 @@ static const int xe_exec_queue_prio_to_guc[] = {
static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
{
struct exec_queue_policy policy;
- struct xe_device *xe = guc_to_xe(guc);
enum xe_exec_queue_priority prio = q->sched_props.priority;
u32 timeslice_us = q->sched_props.timeslice_us;
+ u32 slpc_exec_queue_freq_req = 0;
u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
- xe_assert(xe, exec_queue_registered(q));
+ xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
+
+ if (q->flags & EXEC_QUEUE_FLAG_LOW_LATENCY)
+ slpc_exec_queue_freq_req |= SLPC_CTX_FREQ_REQ_IS_COMPUTE;
__guc_exec_queue_policy_start_klv(&policy, q->guc->id);
__guc_exec_queue_policy_add_priority(&policy, xe_exec_queue_prio_to_guc[prio]);
__guc_exec_queue_policy_add_execution_quantum(&policy, timeslice_us);
__guc_exec_queue_policy_add_preemption_timeout(&policy, preempt_timeout_us);
+ __guc_exec_queue_policy_add_slpc_exec_queue_freq_req(&policy,
+ slpc_exec_queue_freq_req);
xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
__guc_exec_queue_policy_action_size(&policy), 0, 0);
@@ -450,12 +471,11 @@ static void __register_mlrc_exec_queue(struct xe_guc *guc,
struct guc_ctxt_registration_info *info)
{
#define MAX_MLRC_REG_SIZE (13 + XE_HW_ENGINE_MAX_INSTANCE * 2)
- struct xe_device *xe = guc_to_xe(guc);
u32 action[MAX_MLRC_REG_SIZE];
int len = 0;
int i;
- xe_assert(xe, xe_exec_queue_is_parallel(q));
+ xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_parallel(q));
action[len++] = XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
action[len++] = info->flags;
@@ -478,7 +498,7 @@ static void __register_mlrc_exec_queue(struct xe_guc *guc,
action[len++] = upper_32_bits(xe_lrc_descriptor(lrc));
}
- xe_assert(xe, len <= MAX_MLRC_REG_SIZE);
+ xe_gt_assert(guc_to_gt(guc), len <= MAX_MLRC_REG_SIZE);
#undef MAX_MLRC_REG_SIZE
xe_guc_ct_send(&guc->ct, action, len, 0, 0);
@@ -512,7 +532,7 @@ static void register_exec_queue(struct xe_exec_queue *q)
struct xe_lrc *lrc = q->lrc[0];
struct guc_ctxt_registration_info info;
- xe_assert(xe, !exec_queue_registered(q));
+ xe_gt_assert(guc_to_gt(guc), !exec_queue_registered(q));
memset(&info, 0, sizeof(info));
info.context_idx = q->guc->id;
@@ -602,7 +622,7 @@ static int wq_noop_append(struct xe_exec_queue *q)
if (wq_wait_for_space(q, wq_space_until_wrap(q)))
return -ENODEV;
- xe_assert(xe, FIELD_FIT(WQ_LEN_MASK, len_dw));
+ xe_gt_assert(guc_to_gt(guc), FIELD_FIT(WQ_LEN_MASK, len_dw));
parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)],
FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
@@ -642,13 +662,13 @@ static void wq_item_append(struct xe_exec_queue *q)
wqi[i++] = lrc->ring.tail / sizeof(u64);
}
- xe_assert(xe, i == wqi_size / sizeof(u32));
+ xe_gt_assert(guc_to_gt(guc), i == wqi_size / sizeof(u32));
iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch,
wq[q->guc->wqi_tail / sizeof(u32)]));
xe_map_memcpy_to(xe, &map, 0, wqi, wqi_size);
q->guc->wqi_tail += wqi_size;
- xe_assert(xe, q->guc->wqi_tail <= WQ_SIZE);
+ xe_gt_assert(guc_to_gt(guc), q->guc->wqi_tail <= WQ_SIZE);
xe_device_wmb(xe);
@@ -660,7 +680,6 @@ static void wq_item_append(struct xe_exec_queue *q)
static void submit_exec_queue(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_lrc *lrc = q->lrc[0];
u32 action[3];
u32 g2h_len = 0;
@@ -668,7 +687,7 @@ static void submit_exec_queue(struct xe_exec_queue *q)
int len = 0;
bool extra_submit = false;
- xe_assert(xe, exec_queue_registered(q));
+ xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
if (xe_exec_queue_is_parallel(q))
wq_item_append(q);
@@ -715,11 +734,11 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
struct xe_sched_job *job = to_xe_sched_job(drm_job);
struct xe_exec_queue *q = job->q;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
+ struct dma_fence *fence = NULL;
bool lr = xe_exec_queue_is_lr(q);
- xe_assert(xe, !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
- exec_queue_banned(q) || exec_queue_suspended(q));
+ xe_gt_assert(guc_to_gt(guc), !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
+ exec_queue_banned(q) || exec_queue_suspended(q));
trace_xe_sched_job_run(job);
@@ -733,25 +752,23 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
if (lr) {
xe_sched_job_set_error(job, -EOPNOTSUPP);
- return NULL;
- } else if (test_and_set_bit(JOB_FLAG_SUBMIT, &job->fence->flags)) {
- return job->fence;
+ dma_fence_put(job->fence); /* Drop ref from xe_sched_job_arm */
} else {
- return dma_fence_get(job->fence);
+ fence = job->fence;
}
+
+ return fence;
}
static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
{
struct xe_sched_job *job = to_xe_sched_job(drm_job);
- xe_exec_queue_update_run_ticks(job->q);
-
trace_xe_sched_job_free(job);
xe_sched_job_put(job);
}
-static int guc_read_stopped(struct xe_guc *guc)
+int xe_guc_read_stopped(struct xe_guc *guc)
{
return atomic_read(&guc->submission_state.stopped);
}
@@ -767,17 +784,19 @@ static void disable_scheduling_deregister(struct xe_guc *guc,
struct xe_exec_queue *q)
{
MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
- struct xe_device *xe = guc_to_xe(guc);
int ret;
set_min_preemption_timeout(guc, q);
smp_rmb();
- ret = wait_event_timeout(guc->ct.wq, !exec_queue_pending_enable(q) ||
- guc_read_stopped(guc), HZ * 5);
+ ret = wait_event_timeout(guc->ct.wq,
+ (!exec_queue_pending_enable(q) &&
+ !exec_queue_pending_disable(q)) ||
+ xe_guc_read_stopped(guc),
+ HZ * 5);
if (!ret) {
struct xe_gpu_scheduler *sched = &q->guc->sched;
- drm_warn(&xe->drm, "Pending enable failed to respond");
+ xe_gt_warn(q->gt, "Pending enable/disable failed to respond\n");
xe_sched_submission_start(sched);
xe_gt_reset_async(q->gt);
xe_sched_tdr_queue_imm(sched);
@@ -821,17 +840,25 @@ static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
*/
void xe_guc_submit_wedge(struct xe_guc *guc)
{
- struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
struct xe_exec_queue *q;
unsigned long index;
int err;
xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
+ /*
+ * If device is being wedged even before submission_state is
+ * initialized, there's nothing to do here.
+ */
+ if (!guc->submission_state.initialized)
+ return;
+
err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev,
guc_submit_wedged_fini, guc);
if (err) {
- drm_err(&xe->drm, "Failed to register xe_guc_submit clean-up on wedged.mode=2. Although device is wedged.\n");
+ xe_gt_err(gt, "Failed to register clean-up on wedged.mode=2; "
+ "Although device is wedged.\n");
return;
}
@@ -863,11 +890,10 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
container_of(w, struct xe_guc_exec_queue, lr_tdr);
struct xe_exec_queue *q = ge->q;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_gpu_scheduler *sched = &ge->sched;
bool wedged;
- xe_assert(xe, xe_exec_queue_is_lr(q));
+ xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q));
trace_xe_exec_queue_lr_cleanup(q);
wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
@@ -899,15 +925,21 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
*/
ret = wait_event_timeout(guc->ct.wq,
!exec_queue_pending_disable(q) ||
- guc_read_stopped(guc), HZ * 5);
+ xe_guc_read_stopped(guc), HZ * 5);
if (!ret) {
- drm_warn(&xe->drm, "Schedule disable failed to respond");
+ xe_gt_warn(q->gt, "Schedule disable failed to respond, guc_id=%d\n",
+ q->guc->id);
+ xe_devcoredump(q, NULL, "Schedule disable failed to respond, guc_id=%d\n",
+ q->guc->id);
xe_sched_submission_start(sched);
xe_gt_reset_async(q->gt);
return;
}
}
+ if (!exec_queue_killed(q) && !xe_lrc_ring_is_idle(q->lrc[0]))
+ xe_devcoredump(q, NULL, "LR job cleanup, guc_id=%d", q->guc->id);
+
xe_sched_submission_start(sched);
}
@@ -916,12 +948,22 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
{
struct xe_gt *gt = guc_to_gt(exec_queue_to_guc(q));
- u32 ctx_timestamp = xe_lrc_ctx_timestamp(q->lrc[0]);
- u32 ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
+ u32 ctx_timestamp, ctx_job_timestamp;
u32 timeout_ms = q->sched_props.job_timeout_ms;
u32 diff;
u64 running_time_ms;
+ if (!xe_sched_job_started(job)) {
+ xe_gt_warn(gt, "Check job timeout: seqno=%u, lrc_seqno=%u, guc_id=%d, not started",
+ xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
+ q->guc->id);
+
+ return xe_sched_invalidate_job(job, 2);
+ }
+
+ ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(q->lrc[0]));
+ ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
+
/*
* Counter wraps at ~223s at the usual 19.2MHz, be paranoid catch
* possible overflows with a high timeout.
@@ -967,8 +1009,8 @@ static void enable_scheduling(struct xe_exec_queue *q)
ret = wait_event_timeout(guc->ct.wq,
!exec_queue_pending_enable(q) ||
- guc_read_stopped(guc), HZ * 5);
- if (!ret || guc_read_stopped(guc)) {
+ xe_guc_read_stopped(guc), HZ * 5);
+ if (!ret || xe_guc_read_stopped(guc)) {
xe_gt_warn(guc_to_gt(guc), "Schedule enable failed to respond");
set_exec_queue_banned(q);
xe_gt_reset_async(q->gt);
@@ -1023,6 +1065,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
struct xe_gpu_scheduler *sched = &q->guc->sched;
struct xe_guc *guc = exec_queue_to_guc(q);
const char *process_name = "no process";
+ struct xe_device *xe = guc_to_xe(guc);
+ unsigned int fw_ref;
int err = -ETIME;
pid_t pid = -1;
int i = 0;
@@ -1049,9 +1093,21 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
exec_queue_killed_or_banned_or_wedged(q) ||
exec_queue_destroyed(q);
- /* Job hasn't started, can't be timed out */
- if (!skip_timeout_check && !xe_sched_job_started(job))
- goto rearm;
+ /*
+ * If devcoredump not captured and GuC capture for the job is not ready
+ * do manual capture first and decide later if we need to use it
+ */
+ if (!exec_queue_killed(q) && !xe->devcoredump.captured &&
+ !xe_guc_capture_get_matching_and_lock(q)) {
+ /* take force wake before engine register manual capture */
+ fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
+ xe_gt_info(q->gt, "failed to get forcewake for coredump capture\n");
+
+ xe_engine_snapshot_capture_for_queue(q);
+
+ xe_force_wake_put(gt_to_fw(q->gt), fw_ref);
+ }
/*
* XXX: Sampling timeout doesn't work in wedged mode as we have to
@@ -1075,9 +1131,10 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
* modifying state
*/
ret = wait_event_timeout(guc->ct.wq,
- !exec_queue_pending_enable(q) ||
- guc_read_stopped(guc), HZ * 5);
- if (!ret || guc_read_stopped(guc))
+ (!exec_queue_pending_enable(q) &&
+ !exec_queue_pending_disable(q)) ||
+ xe_guc_read_stopped(guc), HZ * 5);
+ if (!ret || xe_guc_read_stopped(guc))
goto trigger_reset;
/*
@@ -1101,11 +1158,16 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
smp_rmb();
ret = wait_event_timeout(guc->ct.wq,
!exec_queue_pending_disable(q) ||
- guc_read_stopped(guc), HZ * 5);
- if (!ret || guc_read_stopped(guc)) {
+ xe_guc_read_stopped(guc), HZ * 5);
+ if (!ret || xe_guc_read_stopped(guc)) {
trigger_reset:
if (!ret)
- xe_gt_warn(guc_to_gt(guc), "Schedule disable failed to respond");
+ xe_gt_warn(guc_to_gt(guc),
+ "Schedule disable failed to respond, guc_id=%d",
+ q->guc->id);
+ xe_devcoredump(q, job,
+ "Schedule disable failed to respond, guc_id=%d, ret=%d, guc_read=%d",
+ q->guc->id, ret, xe_guc_read_stopped(guc));
set_exec_queue_extra_ref(q);
xe_exec_queue_get(q); /* GT reset owns this */
set_exec_queue_banned(q);
@@ -1128,14 +1190,20 @@ trigger_reset:
process_name = q->vm->xef->process_name;
pid = q->vm->xef->pid;
}
- xe_gt_notice(guc_to_gt(guc), "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [%d]",
- xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
- q->guc->id, q->flags, process_name, pid);
+
+ if (!exec_queue_killed(q))
+ xe_gt_notice(guc_to_gt(guc),
+ "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [%d]",
+ xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
+ q->guc->id, q->flags, process_name, pid);
trace_xe_sched_job_timedout(job);
if (!exec_queue_killed(q))
- xe_devcoredump(job);
+ xe_devcoredump(q, job,
+ "Timedout job - seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx",
+ xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
+ q->guc->id, q->flags);
/*
* Kernel jobs should never fail, nor should VM jobs if they do
@@ -1189,7 +1257,7 @@ sched_enable:
enable_scheduling(q);
rearm:
/*
- * XXX: Ideally want to adjust timeout based on current exection time
+ * XXX: Ideally want to adjust timeout based on current execution time
* but there is not currently an easy way to do in DRM scheduler. With
* some thought, do this in a follow up.
*/
@@ -1209,9 +1277,11 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
xe_pm_runtime_get(guc_to_xe(guc));
trace_xe_exec_queue_destroy(q);
+ release_guc_id(guc, q);
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
- release_guc_id(guc, q);
+ /* Confirm no work left behind accessing device structures */
+ cancel_delayed_work_sync(&ge->sched.base.work_tdr);
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);
@@ -1250,9 +1320,8 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
{
struct xe_exec_queue *q = msg->private_data;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
+ xe_gt_assert(guc_to_gt(guc), !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
trace_xe_exec_queue_cleanup_entity(q);
if (exec_queue_registered(q))
@@ -1288,11 +1357,10 @@ static void __suspend_fence_signal(struct xe_exec_queue *q)
static void suspend_fence_signal(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, exec_queue_suspended(q) || exec_queue_killed(q) ||
- guc_read_stopped(guc));
- xe_assert(xe, q->guc->suspend_pending);
+ xe_gt_assert(guc_to_gt(guc), exec_queue_suspended(q) || exec_queue_killed(q) ||
+ xe_guc_read_stopped(guc));
+ xe_gt_assert(guc_to_gt(guc), q->guc->suspend_pending);
__suspend_fence_signal(q);
}
@@ -1304,10 +1372,10 @@ static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) &&
exec_queue_enabled(q)) {
- wait_event(guc->ct.wq, q->guc->resume_time != RESUME_PENDING ||
- guc_read_stopped(guc));
+ wait_event(guc->ct.wq, (q->guc->resume_time != RESUME_PENDING ||
+ xe_guc_read_stopped(guc)) && !exec_queue_pending_disable(q));
- if (!guc_read_stopped(guc)) {
+ if (!xe_guc_read_stopped(guc)) {
s64 since_resume_ms =
ktime_ms_delta(ktime_get(),
q->guc->resume_time);
@@ -1388,12 +1456,11 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
{
struct xe_gpu_scheduler *sched;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_guc_exec_queue *ge;
long timeout;
int err, i;
- xe_assert(xe, xe_device_uc_enabled(guc_to_xe(guc)));
+ xe_gt_assert(guc_to_gt(guc), xe_device_uc_enabled(guc_to_xe(guc)));
ge = kzalloc(sizeof(*ge), GFP_KERNEL);
if (!ge)
@@ -1431,7 +1498,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
q->entity = &ge->entity;
- if (guc_read_stopped(guc))
+ if (xe_guc_read_stopped(guc))
xe_sched_stop(sched);
mutex_unlock(&guc->submission_state.lock);
@@ -1587,7 +1654,7 @@ static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
ret = wait_event_interruptible_timeout(q->guc->suspend_wait,
!READ_ONCE(q->guc->suspend_pending) ||
exec_queue_killed(q) ||
- guc_read_stopped(guc),
+ xe_guc_read_stopped(guc),
HZ * 5);
if (!ret) {
@@ -1606,9 +1673,8 @@ static void guc_exec_queue_resume(struct xe_exec_queue *q)
struct xe_gpu_scheduler *sched = &q->guc->sched;
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, !q->guc->suspend_pending);
+ xe_gt_assert(guc_to_gt(guc), !q->guc->suspend_pending);
xe_sched_msg_lock(sched);
guc_exec_queue_try_add_msg(q, msg, RESUME);
@@ -1681,7 +1747,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
ban = true;
}
} else if (xe_exec_queue_is_lr(q) &&
- (xe_lrc_ring_head(q->lrc[0]) != xe_lrc_ring_tail(q->lrc[0]))) {
+ !xe_lrc_ring_is_idle(q->lrc[0])) {
ban = true;
}
@@ -1696,6 +1762,9 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc)
{
int ret;
+ if (!guc->submission_state.initialized)
+ return 0;
+
/*
* Using an atomic here rather than submission_state.lock as this
* function can be called while holding the CT lock (engine reset
@@ -1713,16 +1782,15 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc)
void xe_guc_submit_reset_wait(struct xe_guc *guc)
{
wait_event(guc->ct.wq, xe_device_wedged(guc_to_xe(guc)) ||
- !guc_read_stopped(guc));
+ !xe_guc_read_stopped(guc));
}
void xe_guc_submit_stop(struct xe_guc *guc)
{
struct xe_exec_queue *q;
unsigned long index;
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, guc_read_stopped(guc) == 1);
+ xe_gt_assert(guc_to_gt(guc), xe_guc_read_stopped(guc) == 1);
mutex_lock(&guc->submission_state.lock);
@@ -1764,9 +1832,8 @@ int xe_guc_submit_start(struct xe_guc *guc)
{
struct xe_exec_queue *q;
unsigned long index;
- struct xe_device *xe = guc_to_xe(guc);
- xe_assert(xe, guc_read_stopped(guc) == 1);
+ xe_gt_assert(guc_to_gt(guc), xe_guc_read_stopped(guc) == 1);
mutex_lock(&guc->submission_state.lock);
atomic_dec(&guc->submission_state.stopped);
@@ -1787,22 +1854,22 @@ int xe_guc_submit_start(struct xe_guc *guc)
static struct xe_exec_queue *
g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
{
- struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
struct xe_exec_queue *q;
if (unlikely(guc_id >= GUC_ID_MAX)) {
- drm_err(&xe->drm, "Invalid guc_id %u", guc_id);
+ xe_gt_err(gt, "Invalid guc_id %u\n", guc_id);
return NULL;
}
q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id);
if (unlikely(!q)) {
- drm_err(&xe->drm, "Not engine present for guc_id %u", guc_id);
+ xe_gt_err(gt, "Not engine present for guc_id %u\n", guc_id);
return NULL;
}
- xe_assert(xe, guc_id >= q->guc->id);
- xe_assert(xe, guc_id < (q->guc->id + q->width));
+ xe_gt_assert(guc_to_gt(guc), guc_id >= q->guc->id);
+ xe_gt_assert(guc_to_gt(guc), guc_id < (q->guc->id + q->width));
return q;
}
@@ -1842,31 +1909,43 @@ static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q,
xe_gt_assert(guc_to_gt(guc), runnable_state == 0);
xe_gt_assert(guc_to_gt(guc), exec_queue_pending_disable(q));
- clear_exec_queue_pending_disable(q);
if (q->guc->suspend_pending) {
suspend_fence_signal(q);
+ clear_exec_queue_pending_disable(q);
} else {
if (exec_queue_banned(q) || check_timeout) {
smp_wmb();
wake_up_all(&guc->ct.wq);
}
- if (!check_timeout)
+ if (!check_timeout && exec_queue_destroyed(q)) {
+ /*
+ * Make sure to clear the pending_disable only
+ * after sampling the destroyed state. We want
+ * to ensure we don't trigger the unregister too
+ * early with something intending to only
+ * disable scheduling. The caller doing the
+ * destroy must wait for an ongoing
+ * pending_disable before marking as destroyed.
+ */
+ clear_exec_queue_pending_disable(q);
deregister_exec_queue(guc, q);
+ } else {
+ clear_exec_queue_pending_disable(q);
+ }
}
}
}
int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
- struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
- u32 guc_id = msg[0];
- u32 runnable_state = msg[1];
+ u32 guc_id, runnable_state;
- if (unlikely(len < 2)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len < 2))
return -EPROTO;
- }
+
+ guc_id = msg[0];
+ runnable_state = msg[1];
q = g2h_exec_queue_lookup(guc, guc_id);
if (unlikely(!q))
@@ -1900,14 +1979,13 @@ static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
- struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
- u32 guc_id = msg[0];
+ u32 guc_id;
- if (unlikely(len < 1)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len < 1))
return -EPROTO;
- }
+
+ guc_id = msg[0];
q = g2h_exec_queue_lookup(guc, guc_id);
if (unlikely(!q))
@@ -1929,14 +2007,13 @@ int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
struct xe_gt *gt = guc_to_gt(guc);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
- u32 guc_id = msg[0];
+ u32 guc_id;
- if (unlikely(len < 1)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len < 1))
return -EPROTO;
- }
+
+ guc_id = msg[0];
q = g2h_exec_queue_lookup(guc, guc_id);
if (unlikely(!q))
@@ -1945,8 +2022,6 @@ int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
xe_gt_info(gt, "Engine reset: engine_class=%s, logical_mask: 0x%x, guc_id=%d",
xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
- /* FIXME: Do error capture, most likely async */
-
trace_xe_exec_queue_reset(q);
/*
@@ -1962,17 +2037,53 @@ int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
return 0;
}
+/*
+ * xe_guc_error_capture_handler - Handler of GuC captured message
+ * @guc: The GuC object
+ * @msg: Point to the message
+ * @len: The message length
+ *
+ * When GuC captured data is ready, GuC will send message
+ * XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION to host, this function will be
+ * called 1st to check status before process the data comes with the message.
+ *
+ * Returns: error code. 0 if success
+ */
+int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ u32 status;
+
+ if (unlikely(len != XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION_DATA_LEN))
+ return -EPROTO;
+
+ status = msg[0] & XE_GUC_STATE_CAPTURE_EVENT_STATUS_MASK;
+ if (status == XE_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE)
+ xe_gt_warn(guc_to_gt(guc), "G2H-Error capture no space");
+
+ xe_guc_capture_process(guc);
+
+ return 0;
+}
+
int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
u32 len)
{
struct xe_gt *gt = guc_to_gt(guc);
- struct xe_device *xe = guc_to_xe(guc);
struct xe_exec_queue *q;
- u32 guc_id = msg[0];
+ u32 guc_id;
- if (unlikely(len < 1)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len < 1))
return -EPROTO;
+
+ guc_id = msg[0];
+
+ if (guc_id == GUC_ID_UNKNOWN) {
+ /*
+ * GuC uses GUC_ID_UNKNOWN if it can not map the CAT fault to any PF/VF
+ * context. In such case only PF will be notified about that fault.
+ */
+ xe_gt_err_ratelimited(gt, "Memory CAT error reported by GuC!\n");
+ return 0;
}
q = g2h_exec_queue_lookup(guc, guc_id);
@@ -1994,24 +2105,22 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
- struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
u8 guc_class, instance;
u32 reason;
- if (unlikely(len != 3)) {
- drm_err(&xe->drm, "Invalid length %u", len);
+ if (unlikely(len != 3))
return -EPROTO;
- }
guc_class = msg[0];
instance = msg[1];
reason = msg[2];
/* Unexpected failure of a hardware feature, log an actual error */
- drm_err(&xe->drm, "GuC engine reset request failed on %d:%d because 0x%08X",
- guc_class, instance, reason);
+ xe_gt_err(gt, "GuC engine reset request failed on %d:%d because 0x%08X",
+ guc_class, instance, reason);
- xe_gt_reset_async(guc_to_gt(guc));
+ xe_gt_reset_async(gt);
return 0;
}
@@ -2176,7 +2285,7 @@ xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps
if (!snapshot)
return;
- drm_printf(p, "\nGuC ID: %d\n", snapshot->guc.id);
+ drm_printf(p, "GuC ID: %d\n", snapshot->guc.id);
drm_printf(p, "\tName: %s\n", snapshot->name);
drm_printf(p, "\tClass: %d\n", snapshot->class);
drm_printf(p, "\tLogical mask: 0x%x\n", snapshot->logical_mask);
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
index bdf8c9f3d24a..9b71a986c6ca 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
@@ -20,12 +20,14 @@ void xe_guc_submit_stop(struct xe_guc *guc);
int xe_guc_submit_start(struct xe_guc *guc);
void xe_guc_submit_wedge(struct xe_guc *guc);
+int xe_guc_read_stopped(struct xe_guc *guc);
int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len);
int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
u32 len);
int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len);
+int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len);
struct xe_guc_submit_exec_queue_snapshot *
xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q);
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index ed150fc09ad0..1fde7614fcc5 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -11,7 +11,9 @@
#include "regs/xe_reg_defs.h"
#include "xe_guc_ads_types.h"
+#include "xe_guc_buf_types.h"
#include "xe_guc_ct_types.h"
+#include "xe_guc_engine_activity_types.h"
#include "xe_guc_fwif.h"
#include "xe_guc_log_types.h"
#include "xe_guc_pc_types.h"
@@ -58,10 +60,23 @@ struct xe_guc {
struct xe_guc_ads ads;
/** @ct: GuC ct */
struct xe_guc_ct ct;
+ /** @buf: GuC Buffer Cache manager */
+ struct xe_guc_buf_cache buf;
+ /** @capture: the error-state-capture module's data and objects */
+ struct xe_guc_state_capture *capture;
/** @pc: GuC Power Conservation */
struct xe_guc_pc pc;
/** @dbm: GuC Doorbell Manager */
struct xe_guc_db_mgr dbm;
+
+ /** @g2g: GuC to GuC communication state */
+ struct {
+ /** @g2g.bo: Storage for GuC to GuC communication channels */
+ struct xe_bo *bo;
+ /** @g2g.owned: Is the BO owned by this GT or just mapped in */
+ bool owned;
+ } g2g;
+
/** @submission_state: GuC submission state */
struct {
/** @submission_state.idm: GuC context ID Manager */
@@ -74,9 +89,15 @@ struct xe_guc {
struct mutex lock;
/** @submission_state.enabled: submission is enabled */
bool enabled;
+ /**
+ * @submission_state.initialized: mark when submission state is
+ * even initialized - before that not even the lock is valid
+ */
+ bool initialized;
/** @submission_state.fini_wq: submit fini wait queue */
wait_queue_head_t fini_wq;
} submission_state;
+
/** @hwconfig: Hardware config state */
struct {
/** @hwconfig.bo: buffer object of the hardware config */
@@ -88,6 +109,9 @@ struct xe_guc {
/** @relay: GuC Relay Communication used in SR-IOV */
struct xe_guc_relay relay;
+ /** @engine_activity: Device specific engine activity */
+ struct xe_guc_engine_activity engine_activity;
+
/**
* @notify_reg: Register which is written to notify GuC of H2G messages
*/
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
index 65b2e147c4b9..27d11e06a82b 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -12,6 +12,7 @@
#include "xe_drv.h"
#include "xe_heci_gsc.h"
#include "xe_platform_types.h"
+#include "xe_survivability_mode.h"
#define GSC_BAR_LENGTH 0x00000FFC
@@ -88,12 +89,9 @@ static void heci_gsc_release_dev(struct device *dev)
kfree(adev);
}
-void xe_heci_gsc_fini(struct xe_device *xe)
+static void xe_heci_gsc_fini(void *arg)
{
- struct xe_heci_gsc *heci_gsc = &xe->heci_gsc;
-
- if (!HAS_HECI_GSCFI(xe) && !HAS_HECI_CSCFI(xe))
- return;
+ struct xe_heci_gsc *heci_gsc = arg;
if (heci_gsc->adev) {
struct auxiliary_device *aux_dev = &heci_gsc->adev->aux_dev;
@@ -105,6 +103,7 @@ void xe_heci_gsc_fini(struct xe_device *xe)
if (heci_gsc->irq >= 0)
irq_free_desc(heci_gsc->irq);
+
heci_gsc->irq = -1;
}
@@ -171,14 +170,14 @@ static int heci_gsc_add_device(struct xe_device *xe, const struct heci_gsc_def *
return ret;
}
-void xe_heci_gsc_init(struct xe_device *xe)
+int xe_heci_gsc_init(struct xe_device *xe)
{
struct xe_heci_gsc *heci_gsc = &xe->heci_gsc;
- const struct heci_gsc_def *def;
+ const struct heci_gsc_def *def = NULL;
int ret;
- if (!HAS_HECI_GSCFI(xe) && !HAS_HECI_CSCFI(xe))
- return;
+ if (!xe->info.has_heci_gscfi && !xe->info.has_heci_cscfi)
+ return 0;
heci_gsc->irq = -1;
@@ -190,29 +189,24 @@ void xe_heci_gsc_init(struct xe_device *xe)
def = &heci_gsc_def_dg2;
} else if (xe->info.platform == XE_DG1) {
def = &heci_gsc_def_dg1;
- } else {
- drm_warn_once(&xe->drm, "Unknown platform\n");
- return;
}
- if (!def->name) {
- drm_warn_once(&xe->drm, "HECI is not implemented!\n");
- return;
+ if (!def || !def->name) {
+ drm_warn(&xe->drm, "HECI is not implemented!\n");
+ return 0;
}
- if (!def->use_polling) {
+ ret = devm_add_action_or_reset(xe->drm.dev, xe_heci_gsc_fini, heci_gsc);
+ if (ret)
+ return ret;
+
+ if (!def->use_polling && !xe_survivability_mode_is_enabled(xe)) {
ret = heci_gsc_irq_setup(xe);
if (ret)
- goto fail;
+ return ret;
}
- ret = heci_gsc_add_device(xe, def);
- if (ret)
- goto fail;
-
- return;
-fail:
- xe_heci_gsc_fini(xe);
+ return heci_gsc_add_device(xe, def);
}
void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir)
@@ -222,7 +216,7 @@ void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir)
if ((iir & GSC_IRQ_INTF(1)) == 0)
return;
- if (!HAS_HECI_GSCFI(xe)) {
+ if (!xe->info.has_heci_gscfi) {
drm_warn_once(&xe->drm, "GSC irq: not supported");
return;
}
@@ -242,7 +236,7 @@ void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir)
if ((iir & CSC_IRQ_INTF(1)) == 0)
return;
- if (!HAS_HECI_CSCFI(xe)) {
+ if (!xe->info.has_heci_cscfi) {
drm_warn_once(&xe->drm, "CSC irq: not supported");
return;
}
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.h b/drivers/gpu/drm/xe/xe_heci_gsc.h
index 48b3b1838045..745eb6783942 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.h
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.h
@@ -33,8 +33,7 @@ struct xe_heci_gsc {
int irq;
};
-void xe_heci_gsc_init(struct xe_device *xe);
-void xe_heci_gsc_fini(struct xe_device *xe);
+int xe_heci_gsc_init(struct xe_device *xe);
void xe_heci_gsc_irq_handler(struct xe_device *xe, u32 iir);
void xe_heci_csc_irq_handler(struct xe_device *xe, u32 iir);
diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
index 2c32dc46f7d4..57b71956ddf4 100644
--- a/drivers/gpu/drm/xe/xe_hmm.c
+++ b/drivers/gpu/drm/xe/xe_hmm.c
@@ -19,39 +19,51 @@ static u64 xe_npages_in_range(unsigned long start, unsigned long end)
return (end - start) >> PAGE_SHIFT;
}
-/*
- * xe_mark_range_accessed() - mark a range is accessed, so core mm
- * have such information for memory eviction or write back to
- * hard disk
- *
- * @range: the range to mark
- * @write: if write to this range, we mark pages in this range
- * as dirty
- */
-static void xe_mark_range_accessed(struct hmm_range *range, bool write)
+static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st,
+ struct hmm_range *range, struct rw_semaphore *notifier_sem)
{
- struct page *page;
- u64 i, npages;
+ unsigned long i, npages, hmm_pfn;
+ unsigned long num_chunks = 0;
+ int ret;
+
+ /* HMM docs says this is needed. */
+ ret = down_read_interruptible(notifier_sem);
+ if (ret)
+ return ret;
+
+ if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) {
+ up_read(notifier_sem);
+ return -EAGAIN;
+ }
npages = xe_npages_in_range(range->start, range->end);
- for (i = 0; i < npages; i++) {
- page = hmm_pfn_to_page(range->hmm_pfns[i]);
- if (write)
- set_page_dirty_lock(page);
+ for (i = 0; i < npages;) {
+ unsigned long len;
+
+ hmm_pfn = range->hmm_pfns[i];
+ xe_assert(xe, hmm_pfn & HMM_PFN_VALID);
- mark_page_accessed(page);
+ len = 1UL << hmm_pfn_to_map_order(hmm_pfn);
+
+ /* If order > 0 the page may extend beyond range->start */
+ len -= (hmm_pfn & ~HMM_PFN_FLAGS) & (len - 1);
+ i += len;
+ num_chunks++;
}
+ up_read(notifier_sem);
+
+ return sg_alloc_table(st, num_chunks, GFP_KERNEL);
}
-/*
+/**
* xe_build_sg() - build a scatter gather table for all the physical pages/pfn
* in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
* and will be used to program GPU page table later.
- *
* @xe: the xe device who will access the dma-address in sg table
* @range: the hmm range that we build the sg table from. range->hmm_pfns[]
* has the pfn numbers of pages that back up this hmm address range.
* @st: pointer to the sg table.
+ * @notifier_sem: The xe notifier lock.
* @write: whether we write to this range. This decides dma map direction
* for system pages. If write we map it bi-diretional; otherwise
* DMA_TO_DEVICE
@@ -78,43 +90,88 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
* Returns 0 if successful; -ENOMEM if fails to allocate memory
*/
static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
- struct sg_table *st, bool write)
+ struct sg_table *st,
+ struct rw_semaphore *notifier_sem,
+ bool write)
{
+ unsigned long npages = xe_npages_in_range(range->start, range->end);
struct device *dev = xe->drm.dev;
- struct page **pages;
- u64 i, npages;
- int ret;
+ struct scatterlist *sgl;
+ struct page *page;
+ unsigned long i, j;
- npages = xe_npages_in_range(range->start, range->end);
- pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
+ lockdep_assert_held(notifier_sem);
+
+ i = 0;
+ for_each_sg(st->sgl, sgl, st->nents, j) {
+ unsigned long hmm_pfn, size;
+
+ hmm_pfn = range->hmm_pfns[i];
+ page = hmm_pfn_to_page(hmm_pfn);
+ xe_assert(xe, !is_device_private_page(page));
+
+ size = 1UL << hmm_pfn_to_map_order(hmm_pfn);
+ size -= page_to_pfn(page) & (size - 1);
+ i += size;
- for (i = 0; i < npages; i++) {
- pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
- xe_assert(xe, !is_device_private_page(pages[i]));
+ if (unlikely(j == st->nents - 1)) {
+ xe_assert(xe, i >= npages);
+ if (i > npages)
+ size -= (i - npages);
+
+ sg_mark_end(sgl);
+ } else {
+ xe_assert(xe, i < npages);
+ }
+
+ sg_set_page(sgl, page, size << PAGE_SHIFT, 0);
}
- ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT,
- xe_sg_segment_size(dev), GFP_KERNEL);
- if (ret)
- goto free_pages;
+ return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
+}
+
+static void xe_hmm_userptr_set_mapped(struct xe_userptr_vma *uvma)
+{
+ struct xe_userptr *userptr = &uvma->userptr;
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+
+ lockdep_assert_held_write(&vm->lock);
+ lockdep_assert_held(&vm->userptr.notifier_lock);
+
+ mutex_lock(&userptr->unmap_mutex);
+ xe_assert(vm->xe, !userptr->mapped);
+ userptr->mapped = true;
+ mutex_unlock(&userptr->unmap_mutex);
+}
- ret = dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
- if (ret) {
- sg_free_table(st);
- st = NULL;
+void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma)
+{
+ struct xe_userptr *userptr = &uvma->userptr;
+ struct xe_vma *vma = &uvma->vma;
+ bool write = !xe_vma_read_only(vma);
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_device *xe = vm->xe;
+
+ if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) &&
+ !lockdep_is_held_type(&vm->lock, 0) &&
+ !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
+ /* Don't unmap in exec critical section. */
+ xe_vm_assert_held(vm);
+ /* Don't unmap while mapping the sg. */
+ lockdep_assert_held(&vm->lock);
}
-free_pages:
- kvfree(pages);
- return ret;
+ mutex_lock(&userptr->unmap_mutex);
+ if (userptr->sg && userptr->mapped)
+ dma_unmap_sgtable(xe->drm.dev, userptr->sg,
+ write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
+ userptr->mapped = false;
+ mutex_unlock(&userptr->unmap_mutex);
}
-/*
+/**
* xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
- *
* @uvma: the userptr vma which hold the scatter gather table
*
* With function xe_userptr_populate_range, we allocate storage of
@@ -124,16 +181,9 @@ free_pages:
void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
{
struct xe_userptr *userptr = &uvma->userptr;
- struct xe_vma *vma = &uvma->vma;
- bool write = !xe_vma_read_only(vma);
- struct xe_vm *vm = xe_vma_vm(vma);
- struct xe_device *xe = vm->xe;
- struct device *dev = xe->drm.dev;
-
- xe_assert(xe, userptr->sg);
- dma_unmap_sgtable(dev, userptr->sg,
- write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
+ xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg);
+ xe_hmm_userptr_unmap(uvma);
sg_free_table(userptr->sg);
userptr->sg = NULL;
}
@@ -159,20 +209,27 @@ void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
* This function allocates the storage of the userptr sg table.
* It is caller's responsibility to free it calling sg_free_table.
*
- * returns: 0 for succuss; negative error no on failure
+ * returns: 0 for success; negative error no on failure
*/
int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
bool is_mm_mmap_locked)
{
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
- unsigned long *pfns, flags = HMM_PFN_REQ_FAULT;
+ unsigned long *pfns;
struct xe_userptr *userptr;
struct xe_vma *vma = &uvma->vma;
u64 userptr_start = xe_vma_userptr(vma);
u64 userptr_end = userptr_start + xe_vma_size(vma);
struct xe_vm *vm = xe_vma_vm(vma);
- struct hmm_range hmm_range;
+ struct hmm_range hmm_range = {
+ .pfn_flags_mask = 0, /* ignore pfns */
+ .default_flags = HMM_PFN_REQ_FAULT,
+ .start = userptr_start,
+ .end = userptr_end,
+ .notifier = &uvma->userptr.notifier,
+ .dev_private_owner = vm->xe,
+ };
bool write = !xe_vma_read_only(vma);
unsigned long notifier_seq;
u64 npages;
@@ -199,19 +256,14 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
return -ENOMEM;
if (write)
- flags |= HMM_PFN_REQ_WRITE;
+ hmm_range.default_flags |= HMM_PFN_REQ_WRITE;
if (!mmget_not_zero(userptr->notifier.mm)) {
ret = -EFAULT;
goto free_pfns;
}
- hmm_range.default_flags = flags;
hmm_range.hmm_pfns = pfns;
- hmm_range.notifier = &userptr->notifier;
- hmm_range.start = userptr_start;
- hmm_range.end = userptr_end;
- hmm_range.dev_private_owner = vm->xe;
while (true) {
hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier);
@@ -238,16 +290,36 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
if (ret)
goto free_pfns;
- ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, write);
+ ret = xe_alloc_sg(vm->xe, &userptr->sgt, &hmm_range, &vm->userptr.notifier_lock);
if (ret)
goto free_pfns;
- xe_mark_range_accessed(&hmm_range, write);
+ ret = down_read_interruptible(&vm->userptr.notifier_lock);
+ if (ret)
+ goto free_st;
+
+ if (mmu_interval_read_retry(hmm_range.notifier, hmm_range.notifier_seq)) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt,
+ &vm->userptr.notifier_lock, write);
+ if (ret)
+ goto out_unlock;
+
userptr->sg = &userptr->sgt;
+ xe_hmm_userptr_set_mapped(uvma);
userptr->notifier_seq = hmm_range.notifier_seq;
+ up_read(&vm->userptr.notifier_lock);
+ kvfree(pfns);
+ return 0;
+out_unlock:
+ up_read(&vm->userptr.notifier_lock);
+free_st:
+ sg_free_table(&userptr->sgt);
free_pfns:
kvfree(pfns);
return ret;
}
-
diff --git a/drivers/gpu/drm/xe/xe_hmm.h b/drivers/gpu/drm/xe/xe_hmm.h
index 909dc2bdcd97..0ea98d8e7bbc 100644
--- a/drivers/gpu/drm/xe/xe_hmm.h
+++ b/drivers/gpu/drm/xe/xe_hmm.h
@@ -3,9 +3,16 @@
* Copyright © 2024 Intel Corporation
*/
+#ifndef _XE_HMM_H_
+#define _XE_HMM_H_
+
#include <linux/types.h>
struct xe_userptr_vma;
int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked);
+
void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma);
+
+void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma);
+#endif
diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c
index f5459f97af23..6a846e4cb221 100644
--- a/drivers/gpu/drm/xe/xe_huc.c
+++ b/drivers/gpu/drm/xe/xe_huc.c
@@ -229,7 +229,7 @@ bool xe_huc_is_authenticated(struct xe_huc *huc, enum xe_huc_auth_types type)
{
struct xe_gt *gt = huc_to_gt(huc);
- return xe_mmio_read32(gt, huc_auth_modes[type].reg) & huc_auth_modes[type].val;
+ return xe_mmio_read32(&gt->mmio, huc_auth_modes[type].reg) & huc_auth_modes[type].val;
}
int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type)
@@ -268,7 +268,7 @@ int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type)
goto fail;
}
- ret = xe_mmio_wait32(gt, huc_auth_modes[type].reg, huc_auth_modes[type].val,
+ ret = xe_mmio_wait32(&gt->mmio, huc_auth_modes[type].reg, huc_auth_modes[type].val,
huc_auth_modes[type].val, 100000, NULL, false);
if (ret) {
xe_gt_err(gt, "HuC: firmware not verified: %pe\n", ERR_PTR(ret));
@@ -296,19 +296,19 @@ void xe_huc_sanitize(struct xe_huc *huc)
void xe_huc_print_info(struct xe_huc *huc, struct drm_printer *p)
{
struct xe_gt *gt = huc_to_gt(huc);
- int err;
+ unsigned int fw_ref;
xe_uc_fw_print(&huc->fw, p);
if (!xe_uc_fw_is_enabled(&huc->fw))
return;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
return;
drm_printf(p, "\nHuC status: 0x%08x\n",
- xe_mmio_read32(gt, HUC_KERNEL_LOAD_INFO));
+ xe_mmio_read32(&gt->mmio, HUC_KERNEL_LOAD_INFO));
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index c9c3beb3ce8d..93241fd0a4ba 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -8,10 +8,13 @@
#include <linux/nospec.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <uapi/drm/xe_drm.h>
+#include <generated/xe_wa_oob.h>
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
+#include "regs/xe_irq_regs.h"
#include "xe_assert.h"
#include "xe_bo.h"
#include "xe_device.h"
@@ -20,9 +23,11 @@
#include "xe_gsc.h"
#include "xe_gt.h"
#include "xe_gt_ccs_mode.h"
+#include "xe_gt_clock.h"
#include "xe_gt_printk.h"
#include "xe_gt_mcr.h"
#include "xe_gt_topology.h"
+#include "xe_guc_capture.h"
#include "xe_hw_engine_group.h"
#include "xe_hw_fence.h"
#include "xe_irq.h"
@@ -295,7 +300,7 @@ void xe_hw_engine_mmio_write32(struct xe_hw_engine *hwe,
reg.addr += hwe->mmio_base;
- xe_mmio_write32(hwe->gt, reg, val);
+ xe_mmio_write32(&hwe->gt->mmio, reg, val);
}
/**
@@ -315,23 +320,26 @@ u32 xe_hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
reg.addr += hwe->mmio_base;
- return xe_mmio_read32(hwe->gt, reg);
+ return xe_mmio_read32(&hwe->gt->mmio, reg);
}
void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
{
u32 ccs_mask =
xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
+ u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE);
if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
- xe_mmio_write32(hwe->gt, RCU_MODE,
+ xe_mmio_write32(&hwe->gt->mmio, RCU_MODE,
_MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
xe_hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
xe_bo_ggtt_addr(hwe->hwsp));
- xe_hw_engine_mmio_write32(hwe, RING_MODE(0),
- _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
+
+ if (xe_device_has_msix(gt_to_xe(hwe->gt)))
+ ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
+ xe_hw_engine_mmio_write32(hwe, RING_MODE(0), ring_mode);
xe_hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
_MASKED_BIT_DISABLE(STOP_RING));
xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
@@ -354,7 +362,7 @@ static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_gt *gt,
hwe->class != XE_ENGINE_CLASS_RENDER)
return false;
- return xe_mmio_read32(hwe->gt, XEHP_FUSE4) & CFEG_WMTP_DISABLE;
+ return xe_mmio_read32(&hwe->gt->mmio, XEHP_FUSE4) & CFEG_WMTP_DISABLE;
}
void
@@ -381,12 +389,6 @@ xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
blit_cctl_val,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
- /* Use Fixed slice CCS mode */
- { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
- XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
- XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
- RCU_MODE_FIXED_SLICE_CCS_MODE))
- },
/* Disable WMTP if HW doesn't support it */
{ XE_RTP_NAME("DISABLE_WMTP_ON_UNSUPPORTED_HW"),
XE_RTP_RULES(FUNC(xe_rtp_cfeg_wmtp_disabled)),
@@ -395,10 +397,9 @@ xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
PREEMPT_GPGPU_THREAD_GROUP_LEVEL)),
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE)
},
- {}
};
- xe_rtp_process_to_sr(&ctx, lrc_setup, &hwe->reg_lrc);
+ xe_rtp_process_to_sr(&ctx, lrc_setup, ARRAY_SIZE(lrc_setup), &hwe->reg_lrc);
}
static void
@@ -417,7 +418,7 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
* Bspec: 72161
*/
const u8 mocs_write_idx = gt->mocs.uc_index;
- const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE &&
+ const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE && IS_DGFX(xe) &&
(GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) ?
gt->mocs.wb_index : gt->mocs.uc_index;
u32 ring_cmd_cctl_val = REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, mocs_write_idx) |
@@ -454,10 +455,39 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe)
XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ,
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
- {}
+ /* Use Fixed slice CCS mode */
+ { XE_RTP_NAME("RCU_MODE_FIXED_SLICE_CCS_MODE"),
+ XE_RTP_RULES(FUNC(xe_hw_engine_match_fixed_cslice_mode)),
+ XE_RTP_ACTIONS(FIELD_SET(RCU_MODE, RCU_MODE_FIXED_SLICE_CCS_MODE,
+ RCU_MODE_FIXED_SLICE_CCS_MODE))
+ },
};
- xe_rtp_process_to_sr(&ctx, engine_entries, &hwe->reg_sr);
+ xe_rtp_process_to_sr(&ctx, engine_entries, ARRAY_SIZE(engine_entries), &hwe->reg_sr);
+}
+
+static const struct engine_info *find_engine_info(enum xe_engine_class class, int instance)
+{
+ const struct engine_info *info;
+ enum xe_hw_engine_id id;
+
+ for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
+ info = &engine_infos[id];
+ if (info->class == class && info->instance == instance)
+ return info;
+ }
+
+ return NULL;
+}
+
+static u16 get_msix_irq_offset(struct xe_gt *gt, enum xe_engine_class class)
+{
+ /* For MSI-X, hw engines report to offset of engine instance zero */
+ const struct engine_info *info = find_engine_info(class, 0);
+
+ xe_gt_assert(gt, info);
+
+ return info ? info->irq_offset : 0;
}
static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
@@ -479,7 +509,9 @@ static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
hwe->class = info->class;
hwe->instance = info->instance;
hwe->mmio_base = info->mmio_base;
- hwe->irq_offset = info->irq_offset;
+ hwe->irq_offset = xe_device_has_msix(gt_to_xe(gt)) ?
+ get_msix_irq_offset(gt, info->class) :
+ info->irq_offset;
hwe->domain = info->domain;
hwe->name = info->name;
hwe->fence_irq = &gt->fence_irq[info->class];
@@ -535,6 +567,33 @@ static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
xe_reg_whitelist_process_engine(hwe);
}
+static void adjust_idledly(struct xe_hw_engine *hwe)
+{
+ struct xe_gt *gt = hwe->gt;
+ u32 idledly, maxcnt;
+ u32 idledly_units_ps = 8 * gt->info.timestamp_base;
+ u32 maxcnt_units_ns = 640;
+ bool inhibit_switch = 0;
+
+ if (!IS_SRIOV_VF(gt_to_xe(hwe->gt)) && XE_WA(gt, 16023105232)) {
+ idledly = xe_mmio_read32(&gt->mmio, RING_IDLEDLY(hwe->mmio_base));
+ maxcnt = xe_mmio_read32(&gt->mmio, RING_PWRCTX_MAXCNT(hwe->mmio_base));
+
+ inhibit_switch = idledly & INHIBIT_SWITCH_UNTIL_PREEMPTED;
+ idledly = REG_FIELD_GET(IDLE_DELAY, idledly);
+ idledly = DIV_ROUND_CLOSEST(idledly * idledly_units_ps, 1000);
+ maxcnt = REG_FIELD_GET(IDLE_WAIT_TIME, maxcnt);
+ maxcnt *= maxcnt_units_ns;
+
+ if (xe_gt_WARN_ON(gt, idledly >= maxcnt || inhibit_switch)) {
+ idledly = DIV_ROUND_CLOSEST(((maxcnt - 1) * maxcnt_units_ns),
+ idledly_units_ps);
+ idledly = DIV_ROUND_CLOSEST(idledly, 1000);
+ xe_mmio_write32(&gt->mmio, RING_IDLEDLY(hwe->mmio_base), idledly);
+ }
+ }
+}
+
static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
enum xe_hw_engine_id id)
{
@@ -546,7 +605,6 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
xe_gt_assert(gt, gt->info.engine_mask & BIT(id));
xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
- xe_reg_sr_apply_whitelist(hwe);
hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
@@ -576,6 +634,9 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY)
gt->usm.reserved_bcs_instance = hwe->instance;
+ /* Ensure IDLEDLY is lower than MAXCNT */
+ adjust_idledly(hwe);
+
return devm_add_action_or_reset(xe->drm.dev, hw_engine_fini, hwe);
err_hwsp:
@@ -612,7 +673,7 @@ static void read_media_fuses(struct xe_gt *gt)
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
- media_fuse = xe_mmio_read32(gt, GT_VEBOX_VDBOX_DISABLE);
+ media_fuse = xe_mmio_read32(&gt->mmio, GT_VEBOX_VDBOX_DISABLE);
/*
* Pre-Xe_HP platforms had register bits representing absent engines,
@@ -657,7 +718,7 @@ static void read_copy_fuses(struct xe_gt *gt)
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
- bcs_mask = xe_mmio_read32(gt, MIRROR_FUSE3);
+ bcs_mask = xe_mmio_read32(&gt->mmio, MIRROR_FUSE3);
bcs_mask = REG_FIELD_GET(MEML3_EN_MASK, bcs_mask);
/* BCS0 is always present; only BCS1-BCS8 may be fused off */
@@ -704,7 +765,7 @@ static void read_compute_fuses_from_reg(struct xe_gt *gt)
struct xe_device *xe = gt_to_xe(gt);
u32 ccs_mask;
- ccs_mask = xe_mmio_read32(gt, XEHP_FUSE4);
+ ccs_mask = xe_mmio_read32(&gt->mmio, XEHP_FUSE4);
ccs_mask = REG_FIELD_GET(CCS_EN_MASK, ccs_mask);
for (int i = XE_HW_ENGINE_CCS0, j = 0; i <= XE_HW_ENGINE_CCS3; ++i, ++j) {
@@ -742,10 +803,10 @@ static void check_gsc_availability(struct xe_gt *gt)
gt->info.engine_mask &= ~BIT(XE_HW_ENGINE_GSCCS0);
/* interrupts where previously enabled, so turn them off */
- xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, 0);
- xe_mmio_write32(gt, GUNIT_GSC_INTR_MASK, ~0);
+ xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_ENABLE, 0);
+ xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_MASK, ~0);
- drm_info(&xe->drm, "gsccs disabled due to lack of FW\n");
+ drm_dbg(&xe->drm, "GSC FW not used, disabling gsccs\n");
}
}
@@ -798,60 +859,10 @@ void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
xe_hw_fence_irq_run(hwe->fence_irq);
}
-static bool
-is_slice_common_per_gslice(struct xe_device *xe)
-{
- return GRAPHICS_VERx100(xe) >= 1255;
-}
-
-static void
-xe_hw_engine_snapshot_instdone_capture(struct xe_hw_engine *hwe,
- struct xe_hw_engine_snapshot *snapshot)
-{
- struct xe_gt *gt = hwe->gt;
- struct xe_device *xe = gt_to_xe(gt);
- unsigned int dss;
- u16 group, instance;
-
- snapshot->reg.instdone.ring = xe_hw_engine_mmio_read32(hwe, RING_INSTDONE(0));
-
- if (snapshot->hwe->class != XE_ENGINE_CLASS_RENDER)
- return;
-
- if (is_slice_common_per_gslice(xe) == false) {
- snapshot->reg.instdone.slice_common[0] =
- xe_mmio_read32(gt, SC_INSTDONE);
- snapshot->reg.instdone.slice_common_extra[0] =
- xe_mmio_read32(gt, SC_INSTDONE_EXTRA);
- snapshot->reg.instdone.slice_common_extra2[0] =
- xe_mmio_read32(gt, SC_INSTDONE_EXTRA2);
- } else {
- for_each_geometry_dss(dss, gt, group, instance) {
- snapshot->reg.instdone.slice_common[dss] =
- xe_gt_mcr_unicast_read(gt, XEHPG_SC_INSTDONE, group, instance);
- snapshot->reg.instdone.slice_common_extra[dss] =
- xe_gt_mcr_unicast_read(gt, XEHPG_SC_INSTDONE_EXTRA, group, instance);
- snapshot->reg.instdone.slice_common_extra2[dss] =
- xe_gt_mcr_unicast_read(gt, XEHPG_SC_INSTDONE_EXTRA2, group, instance);
- }
- }
-
- for_each_geometry_dss(dss, gt, group, instance) {
- snapshot->reg.instdone.sampler[dss] =
- xe_gt_mcr_unicast_read(gt, SAMPLER_INSTDONE, group, instance);
- snapshot->reg.instdone.row[dss] =
- xe_gt_mcr_unicast_read(gt, ROW_INSTDONE, group, instance);
-
- if (GRAPHICS_VERx100(xe) >= 1255)
- snapshot->reg.instdone.geom_svg[dss] =
- xe_gt_mcr_unicast_read(gt, XEHPG_INSTDONE_GEOM_SVGUNIT,
- group, instance);
- }
-}
-
/**
* xe_hw_engine_snapshot_capture - Take a quick snapshot of the HW Engine.
* @hwe: Xe HW Engine.
+ * @q: The exec queue object.
*
* This can be printed out in a later stage like during dev_coredump
* analysis.
@@ -860,11 +871,10 @@ xe_hw_engine_snapshot_instdone_capture(struct xe_hw_engine *hwe,
* caller, using `xe_hw_engine_snapshot_free`.
*/
struct xe_hw_engine_snapshot *
-xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
+xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_exec_queue *q)
{
struct xe_hw_engine_snapshot *snapshot;
- size_t len;
- u64 val;
+ struct __guc_capture_parsed_output *node;
if (!xe_hw_engine_is_valid(hwe))
return NULL;
@@ -874,28 +884,6 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
if (!snapshot)
return NULL;
- /* Because XE_MAX_DSS_FUSE_BITS is defined in xe_gt_types.h and it
- * includes xe_hw_engine_types.h the length of this 3 registers can't be
- * set in struct xe_hw_engine_snapshot, so here doing additional
- * allocations.
- */
- len = (XE_MAX_DSS_FUSE_BITS * sizeof(u32));
- snapshot->reg.instdone.slice_common = kzalloc(len, GFP_ATOMIC);
- snapshot->reg.instdone.slice_common_extra = kzalloc(len, GFP_ATOMIC);
- snapshot->reg.instdone.slice_common_extra2 = kzalloc(len, GFP_ATOMIC);
- snapshot->reg.instdone.sampler = kzalloc(len, GFP_ATOMIC);
- snapshot->reg.instdone.row = kzalloc(len, GFP_ATOMIC);
- snapshot->reg.instdone.geom_svg = kzalloc(len, GFP_ATOMIC);
- if (!snapshot->reg.instdone.slice_common ||
- !snapshot->reg.instdone.slice_common_extra ||
- !snapshot->reg.instdone.slice_common_extra2 ||
- !snapshot->reg.instdone.sampler ||
- !snapshot->reg.instdone.row ||
- !snapshot->reg.instdone.geom_svg) {
- xe_hw_engine_snapshot_free(snapshot);
- return NULL;
- }
-
snapshot->name = kstrdup(hwe->name, GFP_ATOMIC);
snapshot->hwe = hwe;
snapshot->logical_instance = hwe->logical_instance;
@@ -903,157 +891,30 @@ xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe)
snapshot->forcewake.ref = xe_force_wake_ref(gt_to_fw(hwe->gt),
hwe->domain);
snapshot->mmio_base = hwe->mmio_base;
+ snapshot->kernel_reserved = xe_hw_engine_is_reserved(hwe);
/* no more VF accessible data below this point */
if (IS_SRIOV_VF(gt_to_xe(hwe->gt)))
return snapshot;
- snapshot->reg.ring_execlist_status =
- xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0));
- val = xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0));
- snapshot->reg.ring_execlist_status |= val << 32;
-
- snapshot->reg.ring_execlist_sq_contents =
- xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_LO(0));
- val = xe_hw_engine_mmio_read32(hwe, RING_EXECLIST_SQ_CONTENTS_HI(0));
- snapshot->reg.ring_execlist_sq_contents |= val << 32;
-
- snapshot->reg.ring_acthd = xe_hw_engine_mmio_read32(hwe, RING_ACTHD(0));
- val = xe_hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0));
- snapshot->reg.ring_acthd |= val << 32;
-
- snapshot->reg.ring_bbaddr = xe_hw_engine_mmio_read32(hwe, RING_BBADDR(0));
- val = xe_hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0));
- snapshot->reg.ring_bbaddr |= val << 32;
-
- snapshot->reg.ring_dma_fadd =
- xe_hw_engine_mmio_read32(hwe, RING_DMA_FADD(0));
- val = xe_hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0));
- snapshot->reg.ring_dma_fadd |= val << 32;
-
- snapshot->reg.ring_hwstam = xe_hw_engine_mmio_read32(hwe, RING_HWSTAM(0));
- snapshot->reg.ring_hws_pga = xe_hw_engine_mmio_read32(hwe, RING_HWS_PGA(0));
- snapshot->reg.ring_start = xe_hw_engine_mmio_read32(hwe, RING_START(0));
- if (GRAPHICS_VERx100(hwe->gt->tile->xe) >= 2000) {
- val = xe_hw_engine_mmio_read32(hwe, RING_START_UDW(0));
- snapshot->reg.ring_start |= val << 32;
- }
- if (xe_gt_has_indirect_ring_state(hwe->gt)) {
- snapshot->reg.indirect_ring_state =
- xe_hw_engine_mmio_read32(hwe, INDIRECT_RING_STATE(0));
- }
-
- snapshot->reg.ring_head =
- xe_hw_engine_mmio_read32(hwe, RING_HEAD(0)) & HEAD_ADDR;
- snapshot->reg.ring_tail =
- xe_hw_engine_mmio_read32(hwe, RING_TAIL(0)) & TAIL_ADDR;
- snapshot->reg.ring_ctl = xe_hw_engine_mmio_read32(hwe, RING_CTL(0));
- snapshot->reg.ring_mi_mode =
- xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
- snapshot->reg.ring_mode = xe_hw_engine_mmio_read32(hwe, RING_MODE(0));
- snapshot->reg.ring_imr = xe_hw_engine_mmio_read32(hwe, RING_IMR(0));
- snapshot->reg.ring_esr = xe_hw_engine_mmio_read32(hwe, RING_ESR(0));
- snapshot->reg.ring_emr = xe_hw_engine_mmio_read32(hwe, RING_EMR(0));
- snapshot->reg.ring_eir = xe_hw_engine_mmio_read32(hwe, RING_EIR(0));
- snapshot->reg.ipehr = xe_hw_engine_mmio_read32(hwe, RING_IPEHR(0));
- xe_hw_engine_snapshot_instdone_capture(hwe, snapshot);
-
- if (snapshot->hwe->class == XE_ENGINE_CLASS_COMPUTE)
- snapshot->reg.rcu_mode = xe_mmio_read32(hwe->gt, RCU_MODE);
+ if (q) {
+ /* If got guc capture, set source to GuC */
+ node = xe_guc_capture_get_matching_and_lock(q);
+ if (node) {
+ struct xe_device *xe = gt_to_xe(hwe->gt);
+ struct xe_devcoredump *coredump = &xe->devcoredump;
- return snapshot;
-}
-
-static void
-xe_hw_engine_snapshot_instdone_print(struct xe_hw_engine_snapshot *snapshot, struct drm_printer *p)
-{
- struct xe_gt *gt = snapshot->hwe->gt;
- struct xe_device *xe = gt_to_xe(gt);
- u16 group, instance;
- unsigned int dss;
-
- drm_printf(p, "\tRING_INSTDONE: 0x%08x\n", snapshot->reg.instdone.ring);
-
- if (snapshot->hwe->class != XE_ENGINE_CLASS_RENDER)
- return;
-
- if (is_slice_common_per_gslice(xe) == false) {
- drm_printf(p, "\tSC_INSTDONE[0]: 0x%08x\n",
- snapshot->reg.instdone.slice_common[0]);
- drm_printf(p, "\tSC_INSTDONE_EXTRA[0]: 0x%08x\n",
- snapshot->reg.instdone.slice_common_extra[0]);
- drm_printf(p, "\tSC_INSTDONE_EXTRA2[0]: 0x%08x\n",
- snapshot->reg.instdone.slice_common_extra2[0]);
- } else {
- for_each_geometry_dss(dss, gt, group, instance) {
- drm_printf(p, "\tSC_INSTDONE[%u]: 0x%08x\n", dss,
- snapshot->reg.instdone.slice_common[dss]);
- drm_printf(p, "\tSC_INSTDONE_EXTRA[%u]: 0x%08x\n", dss,
- snapshot->reg.instdone.slice_common_extra[dss]);
- drm_printf(p, "\tSC_INSTDONE_EXTRA2[%u]: 0x%08x\n", dss,
- snapshot->reg.instdone.slice_common_extra2[dss]);
+ coredump->snapshot.matched_node = node;
+ xe_gt_dbg(hwe->gt, "Found and locked GuC-err-capture node");
+ return snapshot;
}
}
- for_each_geometry_dss(dss, gt, group, instance) {
- drm_printf(p, "\tSAMPLER_INSTDONE[%u]: 0x%08x\n", dss,
- snapshot->reg.instdone.sampler[dss]);
- drm_printf(p, "\tROW_INSTDONE[%u]: 0x%08x\n", dss,
- snapshot->reg.instdone.row[dss]);
+ /* otherwise, do manual capture */
+ xe_engine_manual_capture(hwe, snapshot);
+ xe_gt_dbg(hwe->gt, "Proceeding with manual engine snapshot");
- if (GRAPHICS_VERx100(xe) >= 1255)
- drm_printf(p, "\tINSTDONE_GEOM_SVGUNIT[%u]: 0x%08x\n",
- dss, snapshot->reg.instdone.geom_svg[dss]);
- }
-}
-
-/**
- * xe_hw_engine_snapshot_print - Print out a given Xe HW Engine snapshot.
- * @snapshot: Xe HW Engine snapshot object.
- * @p: drm_printer where it will be printed out.
- *
- * This function prints out a given Xe HW Engine snapshot object.
- */
-void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot,
- struct drm_printer *p)
-{
- if (!snapshot)
- return;
-
- drm_printf(p, "%s (physical), logical instance=%d\n",
- snapshot->name ? snapshot->name : "",
- snapshot->logical_instance);
- drm_printf(p, "\tForcewake: domain 0x%x, ref %d\n",
- snapshot->forcewake.domain, snapshot->forcewake.ref);
- drm_printf(p, "\tHWSTAM: 0x%08x\n", snapshot->reg.ring_hwstam);
- drm_printf(p, "\tRING_HWS_PGA: 0x%08x\n", snapshot->reg.ring_hws_pga);
- drm_printf(p, "\tRING_EXECLIST_STATUS: 0x%016llx\n",
- snapshot->reg.ring_execlist_status);
- drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS: 0x%016llx\n",
- snapshot->reg.ring_execlist_sq_contents);
- drm_printf(p, "\tRING_START: 0x%016llx\n", snapshot->reg.ring_start);
- drm_printf(p, "\tRING_HEAD: 0x%08x\n", snapshot->reg.ring_head);
- drm_printf(p, "\tRING_TAIL: 0x%08x\n", snapshot->reg.ring_tail);
- drm_printf(p, "\tRING_CTL: 0x%08x\n", snapshot->reg.ring_ctl);
- drm_printf(p, "\tRING_MI_MODE: 0x%08x\n", snapshot->reg.ring_mi_mode);
- drm_printf(p, "\tRING_MODE: 0x%08x\n",
- snapshot->reg.ring_mode);
- drm_printf(p, "\tRING_IMR: 0x%08x\n", snapshot->reg.ring_imr);
- drm_printf(p, "\tRING_ESR: 0x%08x\n", snapshot->reg.ring_esr);
- drm_printf(p, "\tRING_EMR: 0x%08x\n", snapshot->reg.ring_emr);
- drm_printf(p, "\tRING_EIR: 0x%08x\n", snapshot->reg.ring_eir);
- drm_printf(p, "\tACTHD: 0x%016llx\n", snapshot->reg.ring_acthd);
- drm_printf(p, "\tBBADDR: 0x%016llx\n", snapshot->reg.ring_bbaddr);
- drm_printf(p, "\tDMA_FADDR: 0x%016llx\n", snapshot->reg.ring_dma_fadd);
- drm_printf(p, "\tINDIRECT_RING_STATE: 0x%08x\n",
- snapshot->reg.indirect_ring_state);
- drm_printf(p, "\tIPEHR: 0x%08x\n", snapshot->reg.ipehr);
- xe_hw_engine_snapshot_instdone_print(snapshot, p);
-
- if (snapshot->hwe->class == XE_ENGINE_CLASS_COMPUTE)
- drm_printf(p, "\tRCU_MODE: 0x%08x\n",
- snapshot->reg.rcu_mode);
- drm_puts(p, "\n");
+ return snapshot;
}
/**
@@ -1065,15 +926,18 @@ void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot,
*/
void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot)
{
+ struct xe_gt *gt;
if (!snapshot)
return;
- kfree(snapshot->reg.instdone.slice_common);
- kfree(snapshot->reg.instdone.slice_common_extra);
- kfree(snapshot->reg.instdone.slice_common_extra2);
- kfree(snapshot->reg.instdone.sampler);
- kfree(snapshot->reg.instdone.row);
- kfree(snapshot->reg.instdone.geom_svg);
+ gt = snapshot->hwe->gt;
+ /*
+ * xe_guc_capture_put_matched_nodes is called here and from
+ * xe_devcoredump_snapshot_free, to cover the 2 calling paths
+ * of hw_engines - debugfs and devcoredump free.
+ */
+ xe_guc_capture_put_matched_nodes(&gt->uc.guc);
+
kfree(snapshot->name);
kfree(snapshot);
}
@@ -1089,8 +953,8 @@ void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p)
{
struct xe_hw_engine_snapshot *snapshot;
- snapshot = xe_hw_engine_snapshot_capture(hwe);
- xe_hw_engine_snapshot_print(snapshot, p);
+ snapshot = xe_hw_engine_snapshot_capture(hwe, NULL);
+ xe_engine_snapshot_print(snapshot, p);
xe_hw_engine_snapshot_free(snapshot);
}
@@ -1150,7 +1014,7 @@ const char *xe_hw_engine_class_to_str(enum xe_engine_class class)
u64 xe_hw_engine_read_timestamp(struct xe_hw_engine *hwe)
{
- return xe_mmio_read64_2x32(hwe->gt, RING_TIMESTAMP(hwe->mmio_base));
+ return xe_mmio_read64_2x32(&hwe->gt->mmio, RING_TIMESTAMP(hwe->mmio_base));
}
enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe)
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.h b/drivers/gpu/drm/xe/xe_hw_engine.h
index 022819a4a8eb..6b5f9fa2a594 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine.h
@@ -11,6 +11,7 @@
struct drm_printer;
struct drm_xe_engine_class_instance;
struct xe_device;
+struct xe_exec_queue;
#ifdef CONFIG_DRM_XE_JOB_TIMEOUT_MIN
#define XE_HW_ENGINE_JOB_TIMEOUT_MIN CONFIG_DRM_XE_JOB_TIMEOUT_MIN
@@ -54,12 +55,9 @@ void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec);
void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe);
u32 xe_hw_engine_mask_per_class(struct xe_gt *gt,
enum xe_engine_class engine_class);
-
struct xe_hw_engine_snapshot *
-xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe);
+xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_exec_queue *q);
void xe_hw_engine_snapshot_free(struct xe_hw_engine_snapshot *snapshot);
-void xe_hw_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot,
- struct drm_printer *p);
void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p);
void xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe);
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
index b53e8d2accdb..640950172088 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c
@@ -32,14 +32,61 @@ bool xe_hw_engine_timeout_in_range(u64 timeout, u64 min, u64 max)
return timeout >= min && timeout <= max;
}
-static void kobj_xe_hw_engine_release(struct kobject *kobj)
+static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj)
{
kfree(kobj);
}
+static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct xe_device *xe = kobj_to_xe(kobj);
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->show) {
+ xe_pm_runtime_get(xe);
+ ret = kattr->show(kobj, kattr, buf);
+ xe_pm_runtime_put(xe);
+ }
+
+ return ret;
+}
+
+static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct xe_device *xe = kobj_to_xe(kobj);
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->store) {
+ xe_pm_runtime_get(xe);
+ ret = kattr->store(kobj, kattr, buf, count);
+ xe_pm_runtime_put(xe);
+ }
+
+ return ret;
+}
+
+static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = {
+ .show = xe_hw_engine_class_sysfs_attr_show,
+ .store = xe_hw_engine_class_sysfs_attr_store,
+};
+
static const struct kobj_type kobj_xe_hw_engine_type = {
- .release = kobj_xe_hw_engine_release,
- .sysfs_ops = &kobj_sysfs_ops
+ .release = xe_hw_engine_sysfs_kobj_release,
+ .sysfs_ops = &xe_hw_engine_class_sysfs_ops,
+};
+
+static const struct kobj_type kobj_xe_hw_engine_type_def = {
+ .release = xe_hw_engine_sysfs_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
};
static ssize_t job_timeout_max_store(struct kobject *kobj,
@@ -543,7 +590,7 @@ static int xe_add_hw_engine_class_defaults(struct xe_device *xe,
if (!kobj)
return -ENOMEM;
- kobject_init(kobj, &kobj_xe_hw_engine_type);
+ kobject_init(kobj, &kobj_xe_hw_engine_type_def);
err = kobject_add(kobj, parent, "%s", ".defaults");
if (err)
goto err_object;
@@ -558,58 +605,8 @@ err_object:
kobject_put(kobj);
return err;
}
+ALLOW_ERROR_INJECTION(xe_add_hw_engine_class_defaults, ERRNO); /* See xe_pci_probe() */
-static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj)
-{
- kfree(kobj);
-}
-
-static ssize_t xe_hw_engine_class_sysfs_attr_show(struct kobject *kobj,
- struct attribute *attr,
- char *buf)
-{
- struct xe_device *xe = kobj_to_xe(kobj);
- struct kobj_attribute *kattr;
- ssize_t ret = -EIO;
-
- kattr = container_of(attr, struct kobj_attribute, attr);
- if (kattr->show) {
- xe_pm_runtime_get(xe);
- ret = kattr->show(kobj, kattr, buf);
- xe_pm_runtime_put(xe);
- }
-
- return ret;
-}
-
-static ssize_t xe_hw_engine_class_sysfs_attr_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buf,
- size_t count)
-{
- struct xe_device *xe = kobj_to_xe(kobj);
- struct kobj_attribute *kattr;
- ssize_t ret = -EIO;
-
- kattr = container_of(attr, struct kobj_attribute, attr);
- if (kattr->store) {
- xe_pm_runtime_get(xe);
- ret = kattr->store(kobj, kattr, buf, count);
- xe_pm_runtime_put(xe);
- }
-
- return ret;
-}
-
-static const struct sysfs_ops xe_hw_engine_class_sysfs_ops = {
- .show = xe_hw_engine_class_sysfs_attr_show,
- .store = xe_hw_engine_class_sysfs_attr_store,
-};
-
-static const struct kobj_type xe_hw_engine_sysfs_kobj_type = {
- .release = xe_hw_engine_sysfs_kobj_release,
- .sysfs_ops = &xe_hw_engine_class_sysfs_ops,
-};
static void hw_engine_class_sysfs_fini(void *arg)
{
@@ -640,7 +637,7 @@ int xe_hw_engine_class_sysfs_init(struct xe_gt *gt)
if (!kobj)
return -ENOMEM;
- kobject_init(kobj, &xe_hw_engine_sysfs_kobj_type);
+ kobject_init(kobj, &kobj_xe_hw_engine_type);
err = kobject_add(kobj, gt->sysfs, "engines");
if (err)
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c
index 82750520a90a..2d68c5b5262a 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_group.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c
@@ -178,6 +178,7 @@ err_suspend:
up_write(&group->mode_sem);
return err;
}
+ALLOW_ERROR_INJECTION(xe_hw_engine_group_add_exec_queue, ERRNO);
/**
* xe_hw_engine_group_del_exec_queue() - Delete an exec queue from a hw engine group
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_types.h b/drivers/gpu/drm/xe/xe_hw_engine_types.h
index 8be6d420ece4..e4191a7a2c31 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_types.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine_types.h
@@ -106,7 +106,7 @@ struct xe_hw_engine_class_intf {
* Contains all the hardware engine state for physical instances.
*/
struct xe_hw_engine {
- /** @gt: graphics tile this hw engine belongs to */
+ /** @gt: GT structure this hw engine belongs to */
struct xe_gt *gt;
/** @name: name of this hw engine */
const char *name;
@@ -152,6 +152,11 @@ struct xe_hw_engine {
struct xe_hw_engine_group *hw_engine_group;
};
+enum xe_hw_engine_snapshot_source_id {
+ XE_ENGINE_CAPTURE_SOURCE_MANUAL,
+ XE_ENGINE_CAPTURE_SOURCE_GUC
+};
+
/**
* struct xe_hw_engine_snapshot - Hardware engine snapshot
*
@@ -173,65 +178,8 @@ struct xe_hw_engine_snapshot {
} forcewake;
/** @mmio_base: MMIO base address of this hw engine*/
u32 mmio_base;
- /** @reg: Useful MMIO register snapshot */
- struct {
- /** @reg.ring_execlist_status: RING_EXECLIST_STATUS */
- u64 ring_execlist_status;
- /** @reg.ring_execlist_sq_contents: RING_EXECLIST_SQ_CONTENTS */
- u64 ring_execlist_sq_contents;
- /** @reg.ring_acthd: RING_ACTHD */
- u64 ring_acthd;
- /** @reg.ring_bbaddr: RING_BBADDR */
- u64 ring_bbaddr;
- /** @reg.ring_dma_fadd: RING_DMA_FADD */
- u64 ring_dma_fadd;
- /** @reg.ring_hwstam: RING_HWSTAM */
- u32 ring_hwstam;
- /** @reg.ring_hws_pga: RING_HWS_PGA */
- u32 ring_hws_pga;
- /** @reg.ring_start: RING_START */
- u64 ring_start;
- /** @reg.ring_head: RING_HEAD */
- u32 ring_head;
- /** @reg.ring_tail: RING_TAIL */
- u32 ring_tail;
- /** @reg.ring_ctl: RING_CTL */
- u32 ring_ctl;
- /** @reg.ring_mi_mode: RING_MI_MODE */
- u32 ring_mi_mode;
- /** @reg.ring_mode: RING_MODE */
- u32 ring_mode;
- /** @reg.ring_imr: RING_IMR */
- u32 ring_imr;
- /** @reg.ring_esr: RING_ESR */
- u32 ring_esr;
- /** @reg.ring_emr: RING_EMR */
- u32 ring_emr;
- /** @reg.ring_eir: RING_EIR */
- u32 ring_eir;
- /** @reg.indirect_ring_state: INDIRECT_RING_STATE */
- u32 indirect_ring_state;
- /** @reg.ipehr: IPEHR */
- u32 ipehr;
- /** @reg.rcu_mode: RCU_MODE */
- u32 rcu_mode;
- struct {
- /** @reg.instdone.ring: RING_INSTDONE */
- u32 ring;
- /** @reg.instdone.slice_common: SC_INSTDONE */
- u32 *slice_common;
- /** @reg.instdone.slice_common_extra: SC_INSTDONE_EXTRA */
- u32 *slice_common_extra;
- /** @reg.instdone.slice_common_extra2: SC_INSTDONE_EXTRA2 */
- u32 *slice_common_extra2;
- /** @reg.instdone.sampler: SAMPLER_INSTDONE */
- u32 *sampler;
- /** @reg.instdone.row: ROW_INSTDONE */
- u32 *row;
- /** @reg.instdone.geom_svg: INSTDONE_GEOM_SVGUNIT */
- u32 *geom_svg;
- } instdone;
- } reg;
+ /** @kernel_reserved: Engine reserved, can't be used by userspace */
+ bool kernel_reserved;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_hw_fence_types.h b/drivers/gpu/drm/xe/xe_hw_fence_types.h
index 364a61f4bfda..58a8d09afe5c 100644
--- a/drivers/gpu/drm/xe/xe_hw_fence_types.h
+++ b/drivers/gpu/drm/xe/xe_hw_fence_types.h
@@ -41,7 +41,7 @@ struct xe_hw_fence_irq {
* to a xe_hw_fence_irq, maintains serial seqno.
*/
struct xe_hw_fence_ctx {
- /** @gt: graphics tile of hardware fence context */
+ /** @gt: GT structure of hardware fence context */
struct xe_gt *gt;
/** @irq: fence irq handler */
struct xe_hw_fence_irq *irq;
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index aa11728e7e79..f008e8049700 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -5,7 +5,9 @@
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon.h>
+#include <linux/jiffies.h>
#include <linux/types.h>
+#include <linux/units.h>
#include <drm/drm_managed.h>
#include "regs/xe_gt_regs.h"
@@ -20,11 +22,13 @@
#include "xe_pm.h"
enum xe_hwmon_reg {
+ REG_TEMP,
REG_PKG_RAPL_LIMIT,
REG_PKG_POWER_SKU,
REG_PKG_POWER_SKU_UNIT,
REG_GT_PERF_STATUS,
REG_PKG_ENERGY_STATUS,
+ REG_FAN_SPEED,
};
enum xe_hwmon_reg_operation {
@@ -36,9 +40,25 @@ enum xe_hwmon_reg_operation {
enum xe_hwmon_channel {
CHANNEL_CARD,
CHANNEL_PKG,
+ CHANNEL_VRAM,
CHANNEL_MAX,
};
+enum xe_fan_channel {
+ FAN_1,
+ FAN_2,
+ FAN_3,
+ FAN_MAX,
+};
+
+/*
+ * For platforms that support mailbox commands for power limits, REG_PKG_POWER_SKU_UNIT is
+ * not supported and below are SKU units to be used.
+ */
+#define PWR_UNIT 0x3
+#define ENERGY_UNIT 0xe
+#define TIME_UNIT 0xa
+
/*
* SF_* - scale factors for particular quantities according to hwmon spec.
*/
@@ -48,6 +68,18 @@ enum xe_hwmon_channel {
#define SF_ENERGY 1000000 /* microjoules */
#define SF_TIME 1000 /* milliseconds */
+/*
+ * PL*_HWMON_ATTR - mapping of hardware power limits to corresponding hwmon power attribute.
+ */
+#define PL1_HWMON_ATTR hwmon_power_max
+
+#define PWR_ATTR_TO_STR(attr) (((attr) == hwmon_power_max) ? "PL1" : "Invalid")
+
+/*
+ * Timeout for power limit write mailbox command.
+ */
+#define PL_WRITE_MBX_TIMEOUT_MS (1)
+
/**
* struct xe_hwmon_energy_info - to accumulate energy
*/
@@ -59,6 +91,16 @@ struct xe_hwmon_energy_info {
};
/**
+ * struct xe_hwmon_fan_info - to cache previous fan reading
+ */
+struct xe_hwmon_fan_info {
+ /** @reg_val_prev: previous fan reg val */
+ u32 reg_val_prev;
+ /** @time_prev: previous timestamp */
+ u64 time_prev;
+};
+
+/**
* struct xe_hwmon - xe hwmon data structure
*/
struct xe_hwmon {
@@ -76,38 +118,115 @@ struct xe_hwmon {
int scl_shift_time;
/** @ei: Energy info for energyN_input */
struct xe_hwmon_energy_info ei[CHANNEL_MAX];
+ /** @fi: Fan info for fanN_input */
+ struct xe_hwmon_fan_info fi[FAN_MAX];
+ /** @boot_power_limit_read: is boot power limits read */
+ bool boot_power_limit_read;
+ /** @pl1_on_boot: power limit PL1 on boot */
+ u32 pl1_on_boot[CHANNEL_MAX];
};
+static int xe_hwmon_pcode_read_power_limit(const struct xe_hwmon *hwmon, u32 attr, int channel,
+ u32 *uval)
+{
+ struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
+ u32 val0 = 0, val1 = 0;
+ int ret = 0;
+
+ ret = xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
+ (channel == CHANNEL_CARD) ?
+ READ_PSYSGPU_POWER_LIMIT :
+ READ_PACKAGE_POWER_LIMIT,
+ hwmon->boot_power_limit_read ?
+ READ_PL_FROM_PCODE : READ_PL_FROM_FW),
+ &val0, &val1);
+
+ if (ret) {
+ drm_dbg(&hwmon->xe->drm, "read failed ch %d val0 0x%08x, val1 0x%08x, ret %d\n",
+ channel, val0, val1, ret);
+ *uval = 0;
+ return ret;
+ }
+
+ /* return the value only if limit is enabled */
+ if (attr == PL1_HWMON_ATTR)
+ *uval = (val0 & PWR_LIM_EN) ? val0 : 0;
+ else if (attr == hwmon_power_label)
+ *uval = (val0 & PWR_LIM_EN) ? 1 : 0;
+ else
+ *uval = 0;
+
+ return ret;
+}
+
+static int xe_hwmon_pcode_rmw_power_limit(const struct xe_hwmon *hwmon, u32 attr, u8 channel,
+ u32 clr, u32 set)
+{
+ struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
+ u32 val0, val1;
+ int ret = 0;
+
+ ret = xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
+ (channel == CHANNEL_CARD) ?
+ READ_PSYSGPU_POWER_LIMIT :
+ READ_PACKAGE_POWER_LIMIT,
+ hwmon->boot_power_limit_read ?
+ READ_PL_FROM_PCODE : READ_PL_FROM_FW),
+ &val0, &val1);
+
+ if (ret)
+ drm_dbg(&hwmon->xe->drm, "read failed ch %d val0 0x%08x, val1 0x%08x, ret %d\n",
+ channel, val0, val1, ret);
+
+ if (attr == PL1_HWMON_ATTR)
+ val0 = (val0 & ~clr) | set;
+ else
+ return -EIO;
+
+ ret = xe_pcode_write64_timeout(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
+ (channel == CHANNEL_CARD) ?
+ WRITE_PSYSGPU_POWER_LIMIT :
+ WRITE_PACKAGE_POWER_LIMIT, 0),
+ val0, val1, PL_WRITE_MBX_TIMEOUT_MS);
+ if (ret)
+ drm_dbg(&hwmon->xe->drm, "write failed ch %d val0 0x%08x, val1 0x%08x, ret %d\n",
+ channel, val0, val1, ret);
+ return ret;
+}
+
static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
int channel)
{
struct xe_device *xe = hwmon->xe;
switch (hwmon_reg) {
- case REG_PKG_RAPL_LIMIT:
+ case REG_TEMP:
if (xe->info.platform == XE_BATTLEMAGE) {
if (channel == CHANNEL_PKG)
- return BMG_PACKAGE_RAPL_LIMIT;
- else
- return BMG_PLATFORM_POWER_LIMIT;
- } else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) {
+ return BMG_PACKAGE_TEMPERATURE;
+ else if (channel == CHANNEL_VRAM)
+ return BMG_VRAM_TEMPERATURE;
+ } else if (xe->info.platform == XE_DG2) {
+ if (channel == CHANNEL_PKG)
+ return PCU_CR_PACKAGE_TEMPERATURE;
+ else if (channel == CHANNEL_VRAM)
+ return BMG_VRAM_TEMPERATURE;
+ }
+ break;
+ case REG_PKG_RAPL_LIMIT:
+ if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
return PVC_GT0_PACKAGE_RAPL_LIMIT;
- } else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) {
+ else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
return PCU_CR_PACKAGE_RAPL_LIMIT;
- }
break;
case REG_PKG_POWER_SKU:
- if (xe->info.platform == XE_BATTLEMAGE)
- return BMG_PACKAGE_POWER_SKU;
- else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
+ if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
return PVC_GT0_PACKAGE_POWER_SKU;
else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
return PCU_CR_PACKAGE_POWER_SKU;
break;
case REG_PKG_POWER_SKU_UNIT:
- if (xe->info.platform == XE_BATTLEMAGE)
- return BMG_PACKAGE_POWER_SKU_UNIT;
- else if (xe->info.platform == XE_PVC)
+ if (xe->info.platform == XE_PVC)
return PVC_GT0_PACKAGE_POWER_SKU_UNIT;
else if (xe->info.platform == XE_DG2)
return PCU_CR_PACKAGE_POWER_SKU_UNIT;
@@ -128,6 +247,14 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
return PCU_CR_PACKAGE_ENERGY_STATUS;
}
break;
+ case REG_FAN_SPEED:
+ if (channel == FAN_1)
+ return BMG_FAN_1_SPEED;
+ else if (channel == FAN_2)
+ return BMG_FAN_2_SPEED;
+ else if (channel == FAN_3)
+ return BMG_FAN_3_SPEED;
+ break;
default:
drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg);
break;
@@ -136,7 +263,7 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
return XE_REG(0);
}
-#define PL1_DISABLE 0
+#define PL_DISABLE 0
/*
* HW allows arbitrary PL1 limits to be set but silently clamps these values to
@@ -144,67 +271,83 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
* same pattern for sysfs, allow arbitrary PL1 limits to be set but display
* clamped values when read.
*/
-static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *value)
+static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *value)
{
u64 reg_val, min, max;
struct xe_device *xe = hwmon->xe;
struct xe_reg rapl_limit, pkg_power_sku;
- struct xe_gt *mmio = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
- rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
- pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
+ mutex_lock(&hwmon->hwmon_lock);
- /*
- * Valid check of REG_PKG_RAPL_LIMIT is already done in xe_hwmon_power_is_visible.
- * So not checking it again here.
- */
- if (!xe_reg_is_valid(pkg_power_sku)) {
- drm_warn(&xe->drm, "pkg_power_sku invalid\n");
- *value = 0;
- return;
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, (u32 *)&reg_val);
+ } else {
+ rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
+ pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
+
+ /*
+ * Valid check of REG_PKG_RAPL_LIMIT is already done in xe_hwmon_power_is_visible.
+ * So not checking it again here.
+ */
+ if (!xe_reg_is_valid(pkg_power_sku)) {
+ drm_warn(&xe->drm, "pkg_power_sku invalid\n");
+ *value = 0;
+ goto unlock;
+ }
+ reg_val = xe_mmio_read32(mmio, rapl_limit);
}
- mutex_lock(&hwmon->hwmon_lock);
-
- reg_val = xe_mmio_read32(mmio, rapl_limit);
- /* Check if PL1 limit is disabled */
- if (!(reg_val & PKG_PWR_LIM_1_EN)) {
- *value = PL1_DISABLE;
+ /* Check if PL limits are disabled. */
+ if (!(reg_val & PWR_LIM_EN)) {
+ *value = PL_DISABLE;
+ drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%016llx\n",
+ PWR_ATTR_TO_STR(attr), channel, reg_val);
goto unlock;
}
- reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
+ reg_val = REG_FIELD_GET(PWR_LIM_VAL, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
- reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku);
- min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
- min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
- max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
- max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
-
- if (min && max)
- *value = clamp_t(u64, *value, min, max);
+ /* For platforms with mailbox power limit support clamping would be done by pcode. */
+ if (!hwmon->xe->info.has_mbx_power_limits) {
+ reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku);
+ min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
+ max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
+ min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
+ max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
+ if (min && max)
+ *value = clamp_t(u64, *value, min, max);
+ }
unlock:
mutex_unlock(&hwmon->hwmon_lock);
}
-static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long value)
+static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channel, long value)
{
- struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
int ret = 0;
- u64 reg_val;
+ u32 reg_val;
struct xe_reg rapl_limit;
+ mutex_lock(&hwmon->hwmon_lock);
+
rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
- mutex_lock(&hwmon->hwmon_lock);
+ /* Disable Power Limit and verify, as limit cannot be disabled on all platforms. */
+ if (value == PL_DISABLE) {
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ drm_dbg(&hwmon->xe->drm, "disabling %s on channel %d\n",
+ PWR_ATTR_TO_STR(attr), channel);
+ xe_hwmon_pcode_rmw_power_limit(hwmon, attr, channel, PWR_LIM_EN, 0);
+ xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &reg_val);
+ } else {
+ reg_val = xe_mmio_rmw32(mmio, rapl_limit, PWR_LIM_EN, 0);
+ reg_val = xe_mmio_read32(mmio, rapl_limit);
+ }
- /* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
- if (value == PL1_DISABLE) {
- reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN, 0);
- reg_val = xe_mmio_read32(mmio, rapl_limit);
- if (reg_val & PKG_PWR_LIM_1_EN) {
- drm_warn(&hwmon->xe->drm, "PL1 disable is not supported!\n");
+ if (reg_val & PWR_LIM_EN) {
+ drm_warn(&hwmon->xe->drm, "Power limit disable is not supported!\n");
ret = -EOPNOTSUPP;
}
goto unlock;
@@ -212,26 +355,49 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long va
/* Computation in 64-bits to avoid overflow. Round to nearest. */
reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
- reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, reg_val);
- reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
+ reg_val = PWR_LIM_EN | REG_FIELD_PREP(PWR_LIM_VAL, reg_val);
+
+ /*
+ * Clamp power limit to card-firmware default as maximum, as an additional protection to
+ * pcode clamp.
+ */
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ if (reg_val > REG_FIELD_GET(PWR_LIM_VAL, hwmon->pl1_on_boot[channel])) {
+ reg_val = REG_FIELD_GET(PWR_LIM_VAL, hwmon->pl1_on_boot[channel]);
+ drm_dbg(&hwmon->xe->drm, "Clamping power limit to firmware default 0x%x\n",
+ reg_val);
+ }
+ }
+ if (hwmon->xe->info.has_mbx_power_limits)
+ ret = xe_hwmon_pcode_rmw_power_limit(hwmon, attr, channel, PWR_LIM, reg_val);
+ else
+ reg_val = xe_mmio_rmw32(mmio, rapl_limit, PWR_LIM, reg_val);
unlock:
mutex_unlock(&hwmon->hwmon_lock);
return ret;
}
-static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, long *value)
+static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, u32 attr, int channel,
+ long *value)
{
- struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
- struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
- u64 reg_val;
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
+ u32 reg_val;
+
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ /* PL1 is rated max if supported. */
+ xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, channel, &reg_val);
+ } else {
+ /*
+ * This sysfs file won't be visible if REG_PKG_POWER_SKU is invalid, so valid check
+ * for this register can be skipped.
+ * See xe_hwmon_power_is_visible.
+ */
+ struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
+
+ reg_val = xe_mmio_read32(mmio, reg);
+ }
- /*
- * This sysfs file won't be visible if REG_PKG_POWER_SKU is invalid, so valid check
- * for this register can be skipped.
- * See xe_hwmon_power_is_visible.
- */
- reg_val = xe_mmio_read32(mmio, reg);
reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
}
@@ -259,7 +425,7 @@ static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, l
static void
xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy)
{
- struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
struct xe_hwmon_energy_info *ei = &hwmon->ei[channel];
u64 reg_val;
@@ -282,26 +448,38 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
char *buf)
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
- struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
u32 x, y, x_w = 2; /* 2 bits */
u64 r, tau4, out;
- int sensor_index = to_sensor_dev_attr(attr)->index;
+ int channel = to_sensor_dev_attr(attr)->index;
+ u32 power_attr = PL1_HWMON_ATTR;
+ int ret = 0;
xe_pm_runtime_get(hwmon->xe);
mutex_lock(&hwmon->hwmon_lock);
- r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index));
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, (u32 *)&r);
+ if (ret) {
+ drm_err(&hwmon->xe->drm,
+ "power interval read fail, ch %d, attr %d, r 0%llx, ret %d\n",
+ channel, power_attr, r, ret);
+ r = 0;
+ }
+ } else {
+ r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel));
+ }
mutex_unlock(&hwmon->hwmon_lock);
xe_pm_runtime_put(hwmon->xe);
- x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
- y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
+ x = REG_FIELD_GET(PWR_LIM_TIME_X, r);
+ y = REG_FIELD_GET(PWR_LIM_TIME_Y, r);
/*
- * tau = 1.x * power(2,y), x = bits(23:22), y = bits(21:17)
+ * tau = (1 + (x / 4)) * power(2,y), x = bits(23:22), y = bits(21:17)
* = (4 | x) << (y - 2)
*
* Here (y - 2) ensures a 1.x fixed point representation of 1.x
@@ -323,19 +501,20 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
const char *buf, size_t count)
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
- struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
u32 x, y, rxy, x_w = 2; /* 2 bits */
u64 tau4, r, max_win;
unsigned long val;
int ret;
- int sensor_index = to_sensor_dev_attr(attr)->index;
+ int channel = to_sensor_dev_attr(attr)->index;
+ u32 power_attr = PL1_HWMON_ATTR;
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
/*
- * Max HW supported tau in '1.x * power(2,y)' format, x = 0, y = 0x12.
+ * Max HW supported tau in '(1 + (x / 4)) * power(2,y)' format, x = 0, y = 0x12.
* The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds.
*
* The ideal scenario is for PKG_MAX_WIN to be read from the PKG_PWR_SKU register.
@@ -355,11 +534,13 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
tau4 = (u64)((1 << x_w) | x) << y;
max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
- if (val > max_win)
+ if (val > max_win) {
+ drm_warn(&hwmon->xe->drm, "power_interval invalid val 0x%lx\n", val);
return -EINVAL;
+ }
/* val in hw units */
- val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME);
+ val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME) + 1;
/*
* Convert val to 1.x * power(2,y)
@@ -374,14 +555,18 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
x = (val - (1ul << y)) << x_w >> y;
}
- rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
+ rxy = REG_FIELD_PREP(PWR_LIM_TIME_X, x) |
+ REG_FIELD_PREP(PWR_LIM_TIME_Y, y);
xe_pm_runtime_get(hwmon->xe);
mutex_lock(&hwmon->hwmon_lock);
- r = xe_mmio_rmw32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index),
- PKG_PWR_LIM_1_TIME, rxy);
+ if (hwmon->xe->info.has_mbx_power_limits)
+ xe_hwmon_pcode_rmw_power_limit(hwmon, power_attr, channel, PWR_LIM_TIME, rxy);
+ else
+ r = xe_mmio_rmw32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel),
+ PWR_LIM_TIME, rxy);
mutex_unlock(&hwmon->hwmon_lock);
@@ -390,6 +575,7 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
return count;
}
+/* PSYS PL1 */
static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
xe_hwmon_power_max_interval_show,
xe_hwmon_power_max_interval_store, CHANNEL_CARD);
@@ -410,10 +596,19 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret = 0;
+ int channel = index ? CHANNEL_PKG : CHANNEL_CARD;
+ u32 power_attr = PL1_HWMON_ATTR;
+ u32 uval;
xe_pm_runtime_get(hwmon->xe);
- ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, index)) ? attr->mode : 0;
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, &uval);
+ ret = (uval & PWR_LIM_EN) ? attr->mode : 0;
+ } else {
+ ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
+ channel)) ? attr->mode : 0;
+ }
xe_pm_runtime_put(hwmon->xe);
@@ -431,11 +626,14 @@ static const struct attribute_group *hwmon_groups[] = {
};
static const struct hwmon_channel_info * const hwmon_info[] = {
- HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL,
- HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT | HWMON_P_LABEL),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CRIT,
+ HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL),
HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL),
HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL),
HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL),
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT, HWMON_F_INPUT, HWMON_F_INPUT),
NULL
};
@@ -462,6 +660,19 @@ static int xe_hwmon_pcode_write_i1(const struct xe_hwmon *hwmon, u32 uval)
(uval & POWER_SETUP_I1_DATA_MASK));
}
+static int xe_hwmon_pcode_read_fan_control(const struct xe_hwmon *hwmon, u32 subcmd, u32 *uval)
+{
+ struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
+
+ /* Platforms that don't return correct value */
+ if (hwmon->xe->info.platform == XE_DG2 && subcmd == FSC_READ_NUM_FANS) {
+ *uval = 2;
+ return 0;
+ }
+
+ return xe_pcode_read(root_tile, PCODE_MBOX(FAN_SPEED_CONTROL, subcmd, 0), uval, NULL);
+}
+
static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
long *value, u32 scale_factor)
{
@@ -498,7 +709,7 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *value)
{
- struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
u64 reg_val;
reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel));
@@ -507,25 +718,63 @@ static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *valu
}
static umode_t
+xe_hwmon_temp_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
+{
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_label:
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_TEMP, channel)) ? 0444 : 0;
+ default:
+ return 0;
+ }
+}
+
+static int
+xe_hwmon_temp_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
+{
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
+ u64 reg_val;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_TEMP, channel));
+
+ /* HW register value is in degrees Celsius, convert to millidegrees. */
+ *val = REG_FIELD_GET(TEMP_MASK, reg_val) * MILLIDEGREE_PER_DEGREE;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t
xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
{
u32 uval;
switch (attr) {
case hwmon_power_max:
- return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &uval);
+ return (uval) ? 0664 : 0;
+ } else {
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
channel)) ? 0664 : 0;
+ }
case hwmon_power_rated_max:
- return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU,
- channel)) ? 0444 : 0;
+ if (hwmon->xe->info.has_mbx_power_limits)
+ return 0;
+ else
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU,
+ channel)) ? 0444 : 0;
case hwmon_power_crit:
- if (channel == CHANNEL_PKG)
- return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
- !(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
- break;
case hwmon_power_label:
- return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT,
- channel)) ? 0444 : 0;
+ if (channel == CHANNEL_CARD) {
+ xe_hwmon_pcode_read_i1(hwmon, &uval);
+ return (uval & POWER_SETUP_I1_WATTS) ? (attr == hwmon_power_label) ?
+ 0444 : 0644 : 0;
+ }
+ break;
default:
return 0;
}
@@ -537,10 +786,10 @@ xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
{
switch (attr) {
case hwmon_power_max:
- xe_hwmon_power_max_read(hwmon, channel, val);
+ xe_hwmon_power_max_read(hwmon, attr, channel, val);
return 0;
case hwmon_power_rated_max:
- xe_hwmon_power_rated_max_read(hwmon, channel, val);
+ xe_hwmon_power_rated_max_read(hwmon, attr, channel, val);
return 0;
case hwmon_power_crit:
return xe_hwmon_power_curr_crit_read(hwmon, channel, val, SF_POWER);
@@ -554,7 +803,7 @@ xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
{
switch (attr) {
case hwmon_power_max:
- return xe_hwmon_power_max_write(hwmon, channel, val);
+ return xe_hwmon_power_max_write(hwmon, attr, channel, val);
case hwmon_power_crit:
return xe_hwmon_power_curr_crit_write(hwmon, channel, val, SF_POWER);
default:
@@ -658,6 +907,75 @@ xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
}
static umode_t
+xe_hwmon_fan_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
+{
+ u32 uval;
+
+ if (!hwmon->xe->info.has_fan_control)
+ return 0;
+
+ switch (attr) {
+ case hwmon_fan_input:
+ if (xe_hwmon_pcode_read_fan_control(hwmon, FSC_READ_NUM_FANS, &uval))
+ return 0;
+
+ return channel < uval ? 0444 : 0;
+ default:
+ return 0;
+ }
+}
+
+static int
+xe_hwmon_fan_input_read(struct xe_hwmon *hwmon, int channel, long *val)
+{
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
+ struct xe_hwmon_fan_info *fi = &hwmon->fi[channel];
+ u64 rotations, time_now, time;
+ u32 reg_val;
+ int ret = 0;
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_FAN_SPEED, channel));
+ time_now = get_jiffies_64();
+
+ /*
+ * HW register value is accumulated count of pulses from PWM fan with the scale
+ * of 2 pulses per rotation.
+ */
+ rotations = (reg_val - fi->reg_val_prev) / 2;
+
+ time = jiffies_delta_to_msecs(time_now - fi->time_prev);
+ if (unlikely(!time)) {
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ /*
+ * Calculate fan speed in RPM by time averaging two subsequent readings in minutes.
+ * RPM = number of rotations * msecs per minute / time in msecs
+ */
+ *val = DIV_ROUND_UP_ULL(rotations * (MSEC_PER_SEC * 60), time);
+
+ fi->reg_val_prev = reg_val;
+ fi->time_prev = time_now;
+unlock:
+ mutex_unlock(&hwmon->hwmon_lock);
+ return ret;
+}
+
+static int
+xe_hwmon_fan_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_fan_input:
+ return xe_hwmon_fan_input_read(hwmon, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t
xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
u32 attr, int channel)
{
@@ -667,6 +985,9 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
xe_pm_runtime_get(hwmon->xe);
switch (type) {
+ case hwmon_temp:
+ ret = xe_hwmon_temp_is_visible(hwmon, attr, channel);
+ break;
case hwmon_power:
ret = xe_hwmon_power_is_visible(hwmon, attr, channel);
break;
@@ -679,6 +1000,9 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
case hwmon_energy:
ret = xe_hwmon_energy_is_visible(hwmon, attr, channel);
break;
+ case hwmon_fan:
+ ret = xe_hwmon_fan_is_visible(hwmon, attr, channel);
+ break;
default:
ret = 0;
break;
@@ -699,6 +1023,9 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
xe_pm_runtime_get(hwmon->xe);
switch (type) {
+ case hwmon_temp:
+ ret = xe_hwmon_temp_read(hwmon, attr, channel, val);
+ break;
case hwmon_power:
ret = xe_hwmon_power_read(hwmon, attr, channel, val);
break;
@@ -711,6 +1038,9 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
case hwmon_energy:
ret = xe_hwmon_energy_read(hwmon, attr, channel, val);
break;
+ case hwmon_fan:
+ ret = xe_hwmon_fan_read(hwmon, attr, channel, val);
+ break;
default:
ret = -EOPNOTSUPP;
break;
@@ -752,6 +1082,12 @@ static int xe_hwmon_read_label(struct device *dev,
u32 attr, int channel, const char **str)
{
switch (type) {
+ case hwmon_temp:
+ if (channel == CHANNEL_PKG)
+ *str = "pkg";
+ else if (channel == CHANNEL_VRAM)
+ *str = "vram";
+ return 0;
case hwmon_power:
case hwmon_energy:
case hwmon_curr:
@@ -779,27 +1115,50 @@ static const struct hwmon_chip_info hwmon_chip_info = {
};
static void
-xe_hwmon_get_preregistration_info(struct xe_device *xe)
+xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
{
- struct xe_gt *mmio = xe_root_mmio_gt(xe);
- struct xe_hwmon *hwmon = xe->hwmon;
- long energy;
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
+ long energy, fan_speed;
u64 val_sku_unit = 0;
int channel;
struct xe_reg pkg_power_sku_unit;
- /*
- * The contents of register PKG_POWER_SKU_UNIT do not change,
- * so read it once and store the shift values.
- */
- pkg_power_sku_unit = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0);
- if (xe_reg_is_valid(pkg_power_sku_unit)) {
- val_sku_unit = xe_mmio_read32(mmio, pkg_power_sku_unit);
- hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
- hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
- hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ /* Check if card firmware support mailbox power limits commands. */
+ if (xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, CHANNEL_CARD,
+ &hwmon->pl1_on_boot[CHANNEL_CARD]) |
+ xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, CHANNEL_PKG,
+ &hwmon->pl1_on_boot[CHANNEL_PKG])) {
+ drm_warn(&hwmon->xe->drm,
+ "Failed to read power limits, check card firmware !\n");
+ } else {
+ drm_info(&hwmon->xe->drm, "Using mailbox commands for power limits\n");
+ /* Write default limits to read from pcode from now on. */
+ xe_hwmon_pcode_rmw_power_limit(hwmon, PL1_HWMON_ATTR,
+ CHANNEL_CARD, PWR_LIM | PWR_LIM_TIME,
+ hwmon->pl1_on_boot[CHANNEL_CARD]);
+ xe_hwmon_pcode_rmw_power_limit(hwmon, PL1_HWMON_ATTR,
+ CHANNEL_PKG, PWR_LIM | PWR_LIM_TIME,
+ hwmon->pl1_on_boot[CHANNEL_PKG]);
+ hwmon->scl_shift_power = PWR_UNIT;
+ hwmon->scl_shift_energy = ENERGY_UNIT;
+ hwmon->scl_shift_time = TIME_UNIT;
+ hwmon->boot_power_limit_read = true;
+ }
+ } else {
+ drm_info(&hwmon->xe->drm, "Using register for power limits\n");
+ /*
+ * The contents of register PKG_POWER_SKU_UNIT do not change,
+ * so read it once and store the shift values.
+ */
+ pkg_power_sku_unit = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0);
+ if (xe_reg_is_valid(pkg_power_sku_unit)) {
+ val_sku_unit = xe_mmio_read32(mmio, pkg_power_sku_unit);
+ hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
+ hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
+ hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
+ }
}
-
/*
* Initialize 'struct xe_hwmon_energy_info', i.e. set fields to the
* first value of the energy register read
@@ -807,6 +1166,11 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
for (channel = 0; channel < CHANNEL_MAX; channel++)
if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, channel))
xe_hwmon_energy_get(hwmon, channel, &energy);
+
+ /* Initialize 'struct xe_hwmon_fan_info' with initial fan register reading. */
+ for (channel = 0; channel < FAN_MAX; channel++)
+ if (xe_hwmon_is_visible(hwmon, hwmon_fan, hwmon_fan_input, channel))
+ xe_hwmon_fan_input_read(hwmon, channel, &fan_speed);
}
static void xe_hwmon_mutex_destroy(void *arg)
@@ -816,33 +1180,34 @@ static void xe_hwmon_mutex_destroy(void *arg)
mutex_destroy(&hwmon->hwmon_lock);
}
-void xe_hwmon_register(struct xe_device *xe)
+int xe_hwmon_register(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
struct xe_hwmon *hwmon;
+ int ret;
/* hwmon is available only for dGfx */
if (!IS_DGFX(xe))
- return;
+ return 0;
/* hwmon is not available on VFs */
if (IS_SRIOV_VF(xe))
- return;
+ return 0;
hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
if (!hwmon)
- return;
-
- xe->hwmon = hwmon;
+ return -ENOMEM;
mutex_init(&hwmon->hwmon_lock);
- if (devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon))
- return;
+ ret = devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon);
+ if (ret)
+ return ret;
/* There's only one instance of hwmon per device */
hwmon->xe = xe;
+ xe->hwmon = hwmon;
- xe_hwmon_get_preregistration_info(xe);
+ xe_hwmon_get_preregistration_info(hwmon);
drm_dbg(&xe->drm, "Register xe hwmon interface\n");
@@ -850,11 +1215,12 @@ void xe_hwmon_register(struct xe_device *xe)
hwmon->hwmon_dev = devm_hwmon_device_register_with_info(dev, "xe", hwmon,
&hwmon_chip_info,
hwmon_groups);
-
if (IS_ERR(hwmon->hwmon_dev)) {
- drm_warn(&xe->drm, "Failed to register xe hwmon (%pe)\n", hwmon->hwmon_dev);
+ drm_err(&xe->drm, "Failed to register xe hwmon (%pe)\n", hwmon->hwmon_dev);
xe->hwmon = NULL;
- return;
+ return PTR_ERR(hwmon->hwmon_dev);
}
+
+ return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_hwmon.h b/drivers/gpu/drm/xe/xe_hwmon.h
index c42a1de2cd7a..d02c1bfe8c0a 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.h
+++ b/drivers/gpu/drm/xe/xe_hwmon.h
@@ -11,9 +11,9 @@
struct xe_device;
#if IS_REACHABLE(CONFIG_HWMON)
-void xe_hwmon_register(struct xe_device *xe);
+int xe_hwmon_register(struct xe_device *xe);
#else
-static inline void xe_hwmon_register(struct xe_device *xe) { };
+static inline int xe_hwmon_register(struct xe_device *xe) { return 0; };
#endif
#endif /* _XE_HWMON_H_ */
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index 5f2c368c35ad..5362d3174b06 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -10,8 +10,8 @@
#include <drm/drm_managed.h>
#include "display/xe_display.h"
-#include "regs/xe_gt_regs.h"
-#include "regs/xe_regs.h"
+#include "regs/xe_guc_regs.h"
+#include "regs/xe_irq_regs.h"
#include "xe_device.h"
#include "xe_drv.h"
#include "xe_gsc_proxy.h"
@@ -20,6 +20,7 @@
#include "xe_hw_engine.h"
#include "xe_memirq.h"
#include "xe_mmio.h"
+#include "xe_pxp.h"
#include "xe_sriov.h"
/*
@@ -30,14 +31,19 @@
#define IIR(offset) XE_REG(offset + 0x8)
#define IER(offset) XE_REG(offset + 0xc)
-static void assert_iir_is_zero(struct xe_gt *mmio, struct xe_reg reg)
+static int xe_irq_msix_init(struct xe_device *xe);
+static void xe_irq_msix_free(struct xe_device *xe);
+static int xe_irq_msix_request_irqs(struct xe_device *xe);
+static void xe_irq_msix_synchronize_irq(struct xe_device *xe);
+
+static void assert_iir_is_zero(struct xe_mmio *mmio, struct xe_reg reg)
{
u32 val = xe_mmio_read32(mmio, reg);
if (val == 0)
return;
- drm_WARN(&gt_to_xe(mmio)->drm, 1,
+ drm_WARN(&mmio->tile->xe->drm, 1,
"Interrupt register 0x%x is not zero: 0x%08x\n",
reg.addr, val);
xe_mmio_write32(mmio, reg, 0xffffffff);
@@ -52,7 +58,7 @@ static void assert_iir_is_zero(struct xe_gt *mmio, struct xe_reg reg)
*/
static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits)
{
- struct xe_gt *mmio = tile->primary_gt;
+ struct xe_mmio *mmio = &tile->mmio;
/*
* If we're just enabling an interrupt now, it shouldn't already
@@ -70,7 +76,7 @@ static void unmask_and_enable(struct xe_tile *tile, u32 irqregs, u32 bits)
/* Mask and disable all interrupts. */
static void mask_and_disable(struct xe_tile *tile, u32 irqregs)
{
- struct xe_gt *mmio = tile->primary_gt;
+ struct xe_mmio *mmio = &tile->mmio;
xe_mmio_write32(mmio, IMR(irqregs), ~0);
/* Posting read */
@@ -87,7 +93,7 @@ static void mask_and_disable(struct xe_tile *tile, u32 irqregs)
static u32 xelp_intr_disable(struct xe_device *xe)
{
- struct xe_gt *mmio = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
xe_mmio_write32(mmio, GFX_MSTR_IRQ, 0);
@@ -103,7 +109,7 @@ static u32 xelp_intr_disable(struct xe_device *xe)
static u32
gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl)
{
- struct xe_gt *mmio = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
u32 iir;
if (!(master_ctl & GU_MISC_IRQ))
@@ -118,7 +124,7 @@ gu_misc_irq_ack(struct xe_device *xe, const u32 master_ctl)
static inline void xelp_intr_enable(struct xe_device *xe, bool stall)
{
- struct xe_gt *mmio = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
xe_mmio_write32(mmio, GFX_MSTR_IRQ, MASTER_IRQ);
if (stall)
@@ -129,12 +135,13 @@ static inline void xelp_intr_enable(struct xe_device *xe, bool stall)
void xe_irq_enable_hwe(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
+ struct xe_mmio *mmio = &gt->mmio;
u32 ccs_mask, bcs_mask;
u32 irqs, dmask, smask;
u32 gsc_mask = 0;
u32 heci_mask = 0;
- if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe))
+ if (xe_device_uses_memirq(xe))
return;
if (xe_device_uc_enabled(xe)) {
@@ -155,35 +162,35 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
if (!xe_gt_is_media_type(gt)) {
/* Enable interrupts for each engine class */
- xe_mmio_write32(gt, RENDER_COPY_INTR_ENABLE, dmask);
+ xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask);
if (ccs_mask)
- xe_mmio_write32(gt, CCS_RSVD_INTR_ENABLE, smask);
+ xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, smask);
/* Unmask interrupts for each engine instance */
- xe_mmio_write32(gt, RCS0_RSVD_INTR_MASK, ~smask);
- xe_mmio_write32(gt, BCS_RSVD_INTR_MASK, ~smask);
+ xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~smask);
+ xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~smask);
if (bcs_mask & (BIT(1)|BIT(2)))
- xe_mmio_write32(gt, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
+ xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
if (bcs_mask & (BIT(3)|BIT(4)))
- xe_mmio_write32(gt, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
+ xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
if (bcs_mask & (BIT(5)|BIT(6)))
- xe_mmio_write32(gt, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
+ xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
if (bcs_mask & (BIT(7)|BIT(8)))
- xe_mmio_write32(gt, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
+ xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
if (ccs_mask & (BIT(0)|BIT(1)))
- xe_mmio_write32(gt, CCS0_CCS1_INTR_MASK, ~dmask);
+ xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~dmask);
if (ccs_mask & (BIT(2)|BIT(3)))
- xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK, ~dmask);
+ xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~dmask);
}
if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) {
/* Enable interrupts for each engine class */
- xe_mmio_write32(gt, VCS_VECS_INTR_ENABLE, dmask);
+ xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, dmask);
/* Unmask interrupts for each engine instance */
- xe_mmio_write32(gt, VCS0_VCS1_INTR_MASK, ~dmask);
- xe_mmio_write32(gt, VCS2_VCS3_INTR_MASK, ~dmask);
- xe_mmio_write32(gt, VECS0_VECS1_INTR_MASK, ~dmask);
+ xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~dmask);
+ xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~dmask);
+ xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~dmask);
/*
* the heci2 interrupt is enabled via the same register as the
@@ -192,22 +199,31 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) {
gsc_mask = irqs | GSC_ER_COMPLETE;
heci_mask = GSC_IRQ_INTF(1);
- } else if (HAS_HECI_GSCFI(xe)) {
+ } else if (xe->info.has_heci_gscfi) {
gsc_mask = GSC_IRQ_INTF(1);
}
if (gsc_mask) {
- xe_mmio_write32(gt, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask);
- xe_mmio_write32(gt, GUNIT_GSC_INTR_MASK, ~gsc_mask);
+ xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, gsc_mask | heci_mask);
+ xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~gsc_mask);
}
if (heci_mask)
- xe_mmio_write32(gt, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16));
+ xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~(heci_mask << 16));
+
+ if (xe_pxp_is_supported(xe)) {
+ u32 kcr_mask = KCR_PXP_STATE_TERMINATED_INTERRUPT |
+ KCR_APP_TERMINATED_PER_FW_REQ_INTERRUPT |
+ KCR_PXP_STATE_RESET_COMPLETE_INTERRUPT;
+
+ xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_ENABLE, kcr_mask << 16);
+ xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_MASK, ~(kcr_mask << 16));
+ }
}
}
static u32
gt_engine_identity(struct xe_device *xe,
- struct xe_gt *mmio,
+ struct xe_mmio *mmio,
const unsigned int bank,
const unsigned int bit)
{
@@ -279,7 +295,7 @@ static struct xe_gt *pick_engine_gt(struct xe_tile *tile,
return tile->media_gt;
default:
break;
- };
+ }
fallthrough;
default:
return tile->primary_gt;
@@ -291,7 +307,7 @@ static void gt_irq_handler(struct xe_tile *tile,
u32 *identity)
{
struct xe_device *xe = tile_to_xe(tile);
- struct xe_gt *mmio = tile->primary_gt;
+ struct xe_mmio *mmio = &tile->mmio;
unsigned int bank, bit;
u16 instance, intr_vec;
enum xe_engine_class class;
@@ -324,9 +340,15 @@ static void gt_irq_handler(struct xe_tile *tile,
}
if (class == XE_ENGINE_CLASS_OTHER) {
- /* HECI GSCFI interrupts come from outside of GT */
- if (HAS_HECI_GSCFI(xe) && instance == OTHER_GSC_INSTANCE)
+ /*
+ * HECI GSCFI interrupts come from outside of GT.
+ * KCR irqs come from inside GT but are handled
+ * by the global PXP subsystem.
+ */
+ if (xe->info.has_heci_gscfi && instance == OTHER_GSC_INSTANCE)
xe_heci_gsc_irq_handler(xe, intr_vec);
+ else if (instance == OTHER_KCR_INSTANCE)
+ xe_pxp_irq_handler(xe, intr_vec);
else
gt_other_irq_handler(engine_gt, instance, intr_vec);
}
@@ -348,12 +370,8 @@ static irqreturn_t xelp_irq_handler(int irq, void *arg)
unsigned long intr_dw[2];
u32 identity[32];
- spin_lock(&xe->irq.lock);
- if (!xe->irq.enabled) {
- spin_unlock(&xe->irq.lock);
+ if (!atomic_read(&xe->irq.enabled))
return IRQ_NONE;
- }
- spin_unlock(&xe->irq.lock);
master_ctl = xelp_intr_disable(xe);
if (!master_ctl) {
@@ -376,7 +394,7 @@ static irqreturn_t xelp_irq_handler(int irq, void *arg)
static u32 dg1_intr_disable(struct xe_device *xe)
{
- struct xe_gt *mmio = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
u32 val;
/* First disable interrupts */
@@ -394,7 +412,7 @@ static u32 dg1_intr_disable(struct xe_device *xe)
static void dg1_intr_enable(struct xe_device *xe, bool stall)
{
- struct xe_gt *mmio = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
xe_mmio_write32(mmio, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
if (stall)
@@ -417,12 +435,8 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
/* TODO: This really shouldn't be copied+pasted */
- spin_lock(&xe->irq.lock);
- if (!xe->irq.enabled) {
- spin_unlock(&xe->irq.lock);
+ if (!atomic_read(&xe->irq.enabled))
return IRQ_NONE;
- }
- spin_unlock(&xe->irq.lock);
master_tile_ctl = dg1_intr_disable(xe);
if (!master_tile_ctl) {
@@ -431,7 +445,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
}
for_each_tile(tile, xe, id) {
- struct xe_gt *mmio = tile->primary_gt;
+ struct xe_mmio *mmio = &tile->mmio;
if ((master_tile_ctl & DG1_MSTR_TILE(tile->id)) == 0)
continue;
@@ -459,7 +473,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
* the primary tile.
*/
if (id == 0) {
- if (HAS_HECI_CSCFI(xe))
+ if (xe->info.has_heci_cscfi)
xe_heci_csc_irq_handler(xe, master_ctl);
xe_display_irq_handler(xe, master_ctl);
gu_misc_iir = gu_misc_irq_ack(xe, master_ctl);
@@ -474,7 +488,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
static void gt_irq_reset(struct xe_tile *tile)
{
- struct xe_gt *mmio = tile->primary_gt;
+ struct xe_mmio *mmio = &tile->mmio;
u32 ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
XE_ENGINE_CLASS_COMPUTE);
@@ -504,14 +518,16 @@ static void gt_irq_reset(struct xe_tile *tile)
if (ccs_mask & (BIT(0)|BIT(1)))
xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~0);
if (ccs_mask & (BIT(2)|BIT(3)))
- xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0);
+ xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0);
if ((tile->media_gt &&
xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) ||
- HAS_HECI_GSCFI(tile_to_xe(tile))) {
+ tile_to_xe(tile)->info.has_heci_gscfi) {
xe_mmio_write32(mmio, GUNIT_GSC_INTR_ENABLE, 0);
xe_mmio_write32(mmio, GUNIT_GSC_INTR_MASK, ~0);
xe_mmio_write32(mmio, HECI2_RSVD_INTR_MASK, ~0);
+ xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_ENABLE, 0);
+ xe_mmio_write32(mmio, CRYPTO_RSVD_INTR_MASK, ~0);
}
xe_mmio_write32(mmio, GPM_WGBOXPERF_INTR_ENABLE, 0);
@@ -547,7 +563,7 @@ static void dg1_irq_reset(struct xe_tile *tile)
static void dg1_irq_reset_mstr(struct xe_tile *tile)
{
- struct xe_gt *mmio = tile->primary_gt;
+ struct xe_mmio *mmio = &tile->mmio;
xe_mmio_write32(mmio, GFX_MSTR_IRQ, ~0);
}
@@ -566,7 +582,7 @@ static void vf_irq_reset(struct xe_device *xe)
for_each_tile(tile, xe, id) {
if (xe_device_has_memirq(xe))
- xe_memirq_reset(&tile->sriov.vf.memirq);
+ xe_memirq_reset(&tile->memirq);
else
gt_irq_reset(tile);
}
@@ -580,6 +596,11 @@ static void xe_irq_reset(struct xe_device *xe)
if (IS_SRIOV_VF(xe))
return vf_irq_reset(xe);
+ if (xe_device_uses_memirq(xe)) {
+ for_each_tile(tile, xe, id)
+ xe_memirq_reset(&tile->memirq);
+ }
+
for_each_tile(tile, xe, id) {
if (GRAPHICS_VERx100(xe) >= 1210)
dg1_irq_reset(tile);
@@ -609,7 +630,7 @@ static void vf_irq_postinstall(struct xe_device *xe)
for_each_tile(tile, xe, id)
if (xe_device_has_memirq(xe))
- xe_memirq_postinstall(&tile->sriov.vf.memirq);
+ xe_memirq_postinstall(&tile->memirq);
if (GRAPHICS_VERx100(xe) < 1210)
xelp_intr_enable(xe, true);
@@ -622,6 +643,14 @@ static void xe_irq_postinstall(struct xe_device *xe)
if (IS_SRIOV_VF(xe))
return vf_irq_postinstall(xe);
+ if (xe_device_uses_memirq(xe)) {
+ struct xe_tile *tile;
+ unsigned int id;
+
+ for_each_tile(tile, xe, id)
+ xe_memirq_postinstall(&tile->memirq);
+ }
+
xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe));
/*
@@ -644,15 +673,11 @@ static irqreturn_t vf_mem_irq_handler(int irq, void *arg)
struct xe_tile *tile;
unsigned int id;
- spin_lock(&xe->irq.lock);
- if (!xe->irq.enabled) {
- spin_unlock(&xe->irq.lock);
+ if (!atomic_read(&xe->irq.enabled))
return IRQ_NONE;
- }
- spin_unlock(&xe->irq.lock);
for_each_tile(tile, xe, id)
- xe_memirq_handler(&tile->sriov.vf.memirq);
+ xe_memirq_handler(&tile->memirq);
return IRQ_HANDLED;
}
@@ -668,87 +693,105 @@ static irq_handler_t xe_irq_handler(struct xe_device *xe)
return xelp_irq_handler;
}
-static void irq_uninstall(void *arg)
+static int xe_irq_msi_request_irqs(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ irq_handler_t irq_handler;
+ int irq, err;
+
+ irq_handler = xe_irq_handler(xe);
+ if (!irq_handler) {
+ drm_err(&xe->drm, "No supported interrupt handler");
+ return -EINVAL;
+ }
+
+ irq = pci_irq_vector(pdev, 0);
+ err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
+ if (err < 0) {
+ drm_err(&xe->drm, "Failed to request MSI IRQ %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void xe_irq_msi_free(struct xe_device *xe)
{
- struct xe_device *xe = arg;
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
int irq;
- if (!xe->irq.enabled)
+ irq = pci_irq_vector(pdev, 0);
+ free_irq(irq, xe);
+}
+
+static void irq_uninstall(void *arg)
+{
+ struct xe_device *xe = arg;
+
+ if (!atomic_xchg(&xe->irq.enabled, 0))
return;
- xe->irq.enabled = false;
xe_irq_reset(xe);
- irq = pci_irq_vector(pdev, 0);
- free_irq(irq, xe);
+ if (xe_device_has_msix(xe))
+ xe_irq_msix_free(xe);
+ else
+ xe_irq_msi_free(xe);
+}
+
+int xe_irq_init(struct xe_device *xe)
+{
+ spin_lock_init(&xe->irq.lock);
+
+ return xe_irq_msix_init(xe);
}
int xe_irq_install(struct xe_device *xe)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
- unsigned int irq_flags = PCI_IRQ_MSIX;
- irq_handler_t irq_handler;
- int err, irq, nvec;
-
- irq_handler = xe_irq_handler(xe);
- if (!irq_handler) {
- drm_err(&xe->drm, "No supported interrupt handler");
- return -EINVAL;
- }
+ unsigned int irq_flags = PCI_IRQ_MSI;
+ int nvec = 1;
+ int err;
xe_irq_reset(xe);
- nvec = pci_msix_vec_count(pdev);
- if (nvec <= 0) {
- if (nvec == -EINVAL) {
- /* MSIX capability is not supported in the device, using MSI */
- irq_flags = PCI_IRQ_MSI;
- nvec = 1;
- } else {
- drm_err(&xe->drm, "MSIX: Failed getting count\n");
- return nvec;
- }
+ if (xe_device_has_msix(xe)) {
+ nvec = xe->irq.msix.nvec;
+ irq_flags = PCI_IRQ_MSIX;
}
err = pci_alloc_irq_vectors(pdev, nvec, nvec, irq_flags);
if (err < 0) {
- drm_err(&xe->drm, "MSI/MSIX: Failed to enable support %d\n", err);
+ drm_err(&xe->drm, "Failed to allocate IRQ vectors: %d\n", err);
return err;
}
- irq = pci_irq_vector(pdev, 0);
- err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe);
- if (err < 0) {
- drm_err(&xe->drm, "Failed to request MSI/MSIX IRQ %d\n", err);
+ err = xe_device_has_msix(xe) ? xe_irq_msix_request_irqs(xe) :
+ xe_irq_msi_request_irqs(xe);
+ if (err)
return err;
- }
- xe->irq.enabled = true;
+ atomic_set(&xe->irq.enabled, 1);
xe_irq_postinstall(xe);
- err = devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
- if (err)
- goto free_irq_handler;
-
- return 0;
-
-free_irq_handler:
- free_irq(irq, xe);
+ return devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
+}
- return err;
+static void xe_irq_msi_synchronize_irq(struct xe_device *xe)
+{
+ synchronize_irq(to_pci_dev(xe->drm.dev)->irq);
}
void xe_irq_suspend(struct xe_device *xe)
{
- int irq = to_pci_dev(xe->drm.dev)->irq;
+ atomic_set(&xe->irq.enabled, 0); /* no new irqs */
- spin_lock_irq(&xe->irq.lock);
- xe->irq.enabled = false; /* no new irqs */
- spin_unlock_irq(&xe->irq.lock);
-
- synchronize_irq(irq); /* flush irqs */
+ /* flush irqs */
+ if (xe_device_has_msix(xe))
+ xe_irq_msix_synchronize_irq(xe);
+ else
+ xe_irq_msi_synchronize_irq(xe);
xe_irq_reset(xe); /* turn irqs off */
}
@@ -762,10 +805,205 @@ void xe_irq_resume(struct xe_device *xe)
* 1. no irq will arrive before the postinstall
* 2. display is not yet resumed
*/
- xe->irq.enabled = true;
+ atomic_set(&xe->irq.enabled, 1);
xe_irq_reset(xe);
xe_irq_postinstall(xe); /* turn irqs on */
for_each_gt(gt, xe, id)
xe_irq_enable_hwe(gt);
}
+
+/* MSI-X related definitions and functions below. */
+
+enum xe_irq_msix_static {
+ GUC2HOST_MSIX = 0,
+ DEFAULT_MSIX = XE_IRQ_DEFAULT_MSIX,
+ /* Must be last */
+ NUM_OF_STATIC_MSIX,
+};
+
+static int xe_irq_msix_init(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int nvec = pci_msix_vec_count(pdev);
+
+ if (nvec == -EINVAL)
+ return 0; /* MSI */
+
+ if (nvec < 0) {
+ drm_err(&xe->drm, "Failed getting MSI-X vectors count: %d\n", nvec);
+ return nvec;
+ }
+
+ xe->irq.msix.nvec = nvec;
+ xa_init_flags(&xe->irq.msix.indexes, XA_FLAGS_ALLOC);
+ return 0;
+}
+
+static irqreturn_t guc2host_irq_handler(int irq, void *arg)
+{
+ struct xe_device *xe = arg;
+ struct xe_tile *tile;
+ u8 id;
+
+ if (!atomic_read(&xe->irq.enabled))
+ return IRQ_NONE;
+
+ for_each_tile(tile, xe, id)
+ xe_guc_irq_handler(&tile->primary_gt->uc.guc,
+ GUC_INTR_GUC2HOST);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg)
+{
+ unsigned int tile_id, gt_id;
+ struct xe_device *xe = arg;
+ struct xe_memirq *memirq;
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ struct xe_tile *tile;
+ struct xe_gt *gt;
+
+ if (!atomic_read(&xe->irq.enabled))
+ return IRQ_NONE;
+
+ for_each_tile(tile, xe, tile_id) {
+ memirq = &tile->memirq;
+ if (!memirq->bo)
+ continue;
+
+ for_each_gt(gt, xe, gt_id) {
+ if (gt->tile != tile)
+ continue;
+
+ for_each_hw_engine(hwe, gt, id)
+ xe_memirq_hwe_handler(memirq, hwe);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int xe_irq_msix_alloc_vector(struct xe_device *xe, void *irq_buf,
+ bool dynamic_msix, u16 *msix)
+{
+ struct xa_limit limit;
+ int ret;
+ u32 id;
+
+ limit = (dynamic_msix) ? XA_LIMIT(NUM_OF_STATIC_MSIX, xe->irq.msix.nvec - 1) :
+ XA_LIMIT(*msix, *msix);
+ ret = xa_alloc(&xe->irq.msix.indexes, &id, irq_buf, limit, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ if (dynamic_msix)
+ *msix = id;
+
+ return 0;
+}
+
+static void xe_irq_msix_release_vector(struct xe_device *xe, u16 msix)
+{
+ xa_erase(&xe->irq.msix.indexes, msix);
+}
+
+static int xe_irq_msix_request_irq_internal(struct xe_device *xe, irq_handler_t handler,
+ void *irq_buf, const char *name, u16 msix)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int ret, irq;
+
+ irq = pci_irq_vector(pdev, msix);
+ if (irq < 0)
+ return irq;
+
+ ret = request_irq(irq, handler, IRQF_SHARED, name, irq_buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf,
+ const char *name, bool dynamic_msix, u16 *msix)
+{
+ int ret;
+
+ ret = xe_irq_msix_alloc_vector(xe, irq_buf, dynamic_msix, msix);
+ if (ret)
+ return ret;
+
+ ret = xe_irq_msix_request_irq_internal(xe, handler, irq_buf, name, *msix);
+ if (ret) {
+ drm_err(&xe->drm, "Failed to request IRQ for MSI-X %u\n", *msix);
+ xe_irq_msix_release_vector(xe, *msix);
+ return ret;
+ }
+
+ return 0;
+}
+
+void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int irq;
+ void *irq_buf;
+
+ irq_buf = xa_load(&xe->irq.msix.indexes, msix);
+ if (!irq_buf)
+ return;
+
+ irq = pci_irq_vector(pdev, msix);
+ if (irq < 0) {
+ drm_err(&xe->drm, "MSI-X %u can't be released, there is no matching IRQ\n", msix);
+ return;
+ }
+
+ free_irq(irq, irq_buf);
+ xe_irq_msix_release_vector(xe, msix);
+}
+
+int xe_irq_msix_request_irqs(struct xe_device *xe)
+{
+ int err;
+ u16 msix;
+
+ msix = GUC2HOST_MSIX;
+ err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, xe,
+ DRIVER_NAME "-guc2host", false, &msix);
+ if (err)
+ return err;
+
+ msix = DEFAULT_MSIX;
+ err = xe_irq_msix_request_irq(xe, xe_irq_msix_default_hwe_handler, xe,
+ DRIVER_NAME "-default-msix", false, &msix);
+ if (err) {
+ xe_irq_msix_free_irq(xe, GUC2HOST_MSIX);
+ return err;
+ }
+
+ return 0;
+}
+
+void xe_irq_msix_free(struct xe_device *xe)
+{
+ unsigned long msix;
+ u32 *dummy;
+
+ xa_for_each(&xe->irq.msix.indexes, msix, dummy)
+ xe_irq_msix_free_irq(xe, msix);
+ xa_destroy(&xe->irq.msix.indexes);
+}
+
+void xe_irq_msix_synchronize_irq(struct xe_device *xe)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ unsigned long msix;
+ u32 *dummy;
+
+ xa_for_each(&xe->irq.msix.indexes, msix, dummy)
+ synchronize_irq(pci_irq_vector(pdev, msix));
+}
diff --git a/drivers/gpu/drm/xe/xe_irq.h b/drivers/gpu/drm/xe/xe_irq.h
index 067514e13675..a28bd577ba52 100644
--- a/drivers/gpu/drm/xe/xe_irq.h
+++ b/drivers/gpu/drm/xe/xe_irq.h
@@ -6,13 +6,21 @@
#ifndef _XE_IRQ_H_
#define _XE_IRQ_H_
+#include <linux/interrupt.h>
+
+#define XE_IRQ_DEFAULT_MSIX 1
+
struct xe_device;
struct xe_tile;
struct xe_gt;
+int xe_irq_init(struct xe_device *xe);
int xe_irq_install(struct xe_device *xe);
void xe_irq_suspend(struct xe_device *xe);
void xe_irq_resume(struct xe_device *xe);
void xe_irq_enable_hwe(struct xe_gt *gt);
+int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf,
+ const char *name, bool dynamic_msix, u16 *msix);
+void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix);
#endif
diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
index 8999ac511555..63db66df064b 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.c
+++ b/drivers/gpu/drm/xe/xe_lmtt.c
@@ -71,7 +71,7 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
lmtt->ops->lmtt_pte_num(level)),
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
- XE_BO_FLAG_NEEDS_64K | XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_NEEDS_64K);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out_free_pt;
@@ -164,7 +164,7 @@ int xe_lmtt_init(struct xe_lmtt *lmtt)
lmtt_assert(lmtt, IS_SRIOV_PF(xe));
lmtt_assert(lmtt, !lmtt->ops);
- if (!IS_DGFX(xe))
+ if (!xe_device_has_lmtt(xe))
return 0;
if (xe_has_multi_level_lmtt(xe))
@@ -193,7 +193,7 @@ static void lmtt_setup_dir_ptr(struct xe_lmtt *lmtt)
lmtt_assert(lmtt, xe_bo_is_vram(lmtt->pd->bo));
lmtt_assert(lmtt, IS_ALIGNED(offset, SZ_64K));
- xe_mmio_write32(tile->primary_gt,
+ xe_mmio_write32(&tile->mmio,
GRAPHICS_VER(xe) >= 20 ? XE2_LMEM_CFG : LMEM_CFG,
LMEM_EN | REG_FIELD_PREP(LMTT_DIR_PTR, offset / SZ_64K));
}
@@ -486,7 +486,7 @@ u64 xe_lmtt_estimate_pt_size(struct xe_lmtt *lmtt, u64 size)
u64 pt_size;
lmtt_assert(lmtt, IS_SRIOV_PF(lmtt_to_xe(lmtt)));
- lmtt_assert(lmtt, IS_DGFX(lmtt_to_xe(lmtt)));
+ lmtt_assert(lmtt, xe_device_has_lmtt(lmtt_to_xe(lmtt)));
lmtt_assert(lmtt, lmtt->ops);
pt_size = PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index aec7db39c061..bf7c3981897d 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -24,7 +24,9 @@
#include "xe_hw_fence.h"
#include "xe_map.h"
#include "xe_memirq.h"
+#include "xe_mmio.h"
#include "xe_sriov.h"
+#include "xe_trace_lrc.h"
#include "xe_vm.h"
#include "xe_wa.h"
@@ -36,26 +38,9 @@
#define LRC_ENGINE_CLASS GENMASK_ULL(63, 61)
#define LRC_ENGINE_INSTANCE GENMASK_ULL(53, 48)
+#define LRC_PPHWSP_SIZE SZ_4K
#define LRC_INDIRECT_RING_STATE_SIZE SZ_4K
-struct xe_lrc_snapshot {
- struct xe_bo *lrc_bo;
- void *lrc_snapshot;
- unsigned long lrc_size, lrc_offset;
-
- u32 context_desc;
- u32 indirect_context_desc;
- u32 head;
- struct {
- u32 internal;
- u32 memory;
- } tail;
- u32 start_seqno;
- u32 seqno;
- u32 ctx_timestamp;
- u32 ctx_job_timestamp;
-};
-
static struct xe_device *
lrc_to_xe(struct xe_lrc *lrc)
{
@@ -67,19 +52,22 @@ size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class)
struct xe_device *xe = gt_to_xe(gt);
size_t size;
+ /* Per-process HW status page (PPHWSP) */
+ size = LRC_PPHWSP_SIZE;
+
+ /* Engine context image */
switch (class) {
case XE_ENGINE_CLASS_RENDER:
if (GRAPHICS_VER(xe) >= 20)
- size = 4 * SZ_4K;
+ size += 3 * SZ_4K;
else
- size = 14 * SZ_4K;
+ size += 13 * SZ_4K;
break;
case XE_ENGINE_CLASS_COMPUTE:
- /* 14 pages since graphics_ver == 11 */
if (GRAPHICS_VER(xe) >= 20)
- size = 3 * SZ_4K;
+ size += 2 * SZ_4K;
else
- size = 14 * SZ_4K;
+ size += 13 * SZ_4K;
break;
default:
WARN(1, "Unknown engine class: %d", class);
@@ -88,7 +76,7 @@ size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class)
case XE_ENGINE_CLASS_VIDEO_DECODE:
case XE_ENGINE_CLASS_VIDEO_ENHANCE:
case XE_ENGINE_CLASS_OTHER:
- size = 2 * SZ_4K;
+ size += 1 * SZ_4K;
}
/* Add indirect ring state page */
@@ -599,10 +587,11 @@ static void set_context_control(u32 *regs, struct xe_hw_engine *hwe)
static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe)
{
- struct xe_memirq *memirq = &gt_to_tile(hwe->gt)->sriov.vf.memirq;
+ struct xe_memirq *memirq = &gt_to_tile(hwe->gt)->memirq;
struct xe_device *xe = gt_to_xe(hwe->gt);
+ u8 num_regs;
- if (!IS_SRIOV_VF(xe) || !xe_device_has_memirq(xe))
+ if (!xe_device_uses_memirq(xe))
return;
regs[CTX_LRM_INT_MASK_ENABLE] = MI_LOAD_REGISTER_MEM |
@@ -610,12 +599,18 @@ static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe)
regs[CTX_INT_MASK_ENABLE_REG] = RING_IMR(0).addr;
regs[CTX_INT_MASK_ENABLE_PTR] = xe_memirq_enable_ptr(memirq);
- regs[CTX_LRI_INT_REPORT_PTR] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
+ num_regs = xe_device_has_msix(xe) ? 3 : 2;
+ regs[CTX_LRI_INT_REPORT_PTR] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(num_regs) |
MI_LRI_LRM_CS_MMIO | MI_LRI_FORCE_POSTED;
regs[CTX_INT_STATUS_REPORT_REG] = RING_INT_STATUS_RPT_PTR(0).addr;
- regs[CTX_INT_STATUS_REPORT_PTR] = xe_memirq_status_ptr(memirq);
+ regs[CTX_INT_STATUS_REPORT_PTR] = xe_memirq_status_ptr(memirq, hwe);
regs[CTX_INT_SRC_REPORT_REG] = RING_INT_SRC_RPT_PTR(0).addr;
- regs[CTX_INT_SRC_REPORT_PTR] = xe_memirq_source_ptr(memirq);
+ regs[CTX_INT_SRC_REPORT_PTR] = xe_memirq_source_ptr(memirq, hwe);
+
+ if (xe_device_has_msix(xe)) {
+ regs[CTX_CS_INT_VEC_REG] = CS_INT_VEC(0).addr;
+ /* CTX_CS_INT_VEC_DATA will be set in xe_lrc_init */
+ }
}
static int lrc_ring_mi_mode(struct xe_hw_engine *hwe)
@@ -660,7 +655,7 @@ u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc)
#define LRC_START_SEQNO_PPHWSP_OFFSET (LRC_SEQNO_PPHWSP_OFFSET + 8)
#define LRC_CTX_JOB_TIMESTAMP_OFFSET (LRC_START_SEQNO_PPHWSP_OFFSET + 8)
#define LRC_PARALLEL_PPHWSP_OFFSET 2048
-#define LRC_PPHWSP_SIZE SZ_4K
+#define LRC_ENGINE_ID_PPHWSP_OFFSET 2096
u32 xe_lrc_regs_offset(struct xe_lrc *lrc)
{
@@ -694,7 +689,7 @@ static inline u32 __xe_lrc_start_seqno_offset(struct xe_lrc *lrc)
static u32 __xe_lrc_ctx_job_timestamp_offset(struct xe_lrc *lrc)
{
- /* The start seqno is stored in the driver-defined portion of PPHWSP */
+ /* This is stored in the driver-defined portion of PPHWSP */
return xe_lrc_pphwsp_offset(lrc) + LRC_CTX_JOB_TIMESTAMP_OFFSET;
}
@@ -704,11 +699,21 @@ static inline u32 __xe_lrc_parallel_offset(struct xe_lrc *lrc)
return xe_lrc_pphwsp_offset(lrc) + LRC_PARALLEL_PPHWSP_OFFSET;
}
+static inline u32 __xe_lrc_engine_id_offset(struct xe_lrc *lrc)
+{
+ return xe_lrc_pphwsp_offset(lrc) + LRC_ENGINE_ID_PPHWSP_OFFSET;
+}
+
static u32 __xe_lrc_ctx_timestamp_offset(struct xe_lrc *lrc)
{
return __xe_lrc_regs_offset(lrc) + CTX_TIMESTAMP * sizeof(u32);
}
+static u32 __xe_lrc_ctx_timestamp_udw_offset(struct xe_lrc *lrc)
+{
+ return __xe_lrc_regs_offset(lrc) + CTX_TIMESTAMP_UDW * sizeof(u32);
+}
+
static inline u32 __xe_lrc_indirect_ring_offset(struct xe_lrc *lrc)
{
/* Indirect ring state page is at the very end of LRC */
@@ -736,8 +741,10 @@ DECL_MAP_ADDR_HELPERS(regs)
DECL_MAP_ADDR_HELPERS(start_seqno)
DECL_MAP_ADDR_HELPERS(ctx_job_timestamp)
DECL_MAP_ADDR_HELPERS(ctx_timestamp)
+DECL_MAP_ADDR_HELPERS(ctx_timestamp_udw)
DECL_MAP_ADDR_HELPERS(parallel)
DECL_MAP_ADDR_HELPERS(indirect_ring)
+DECL_MAP_ADDR_HELPERS(engine_id)
#undef DECL_MAP_ADDR_HELPERS
@@ -753,18 +760,37 @@ u32 xe_lrc_ctx_timestamp_ggtt_addr(struct xe_lrc *lrc)
}
/**
+ * xe_lrc_ctx_timestamp_udw_ggtt_addr() - Get ctx timestamp udw GGTT address
+ * @lrc: Pointer to the lrc.
+ *
+ * Returns: ctx timestamp udw GGTT address
+ */
+u32 xe_lrc_ctx_timestamp_udw_ggtt_addr(struct xe_lrc *lrc)
+{
+ return __xe_lrc_ctx_timestamp_udw_ggtt_addr(lrc);
+}
+
+/**
* xe_lrc_ctx_timestamp() - Read ctx timestamp value
* @lrc: Pointer to the lrc.
*
* Returns: ctx timestamp value
*/
-u32 xe_lrc_ctx_timestamp(struct xe_lrc *lrc)
+u64 xe_lrc_ctx_timestamp(struct xe_lrc *lrc)
{
struct xe_device *xe = lrc_to_xe(lrc);
struct iosys_map map;
+ u32 ldw, udw = 0;
map = __xe_lrc_ctx_timestamp_map(lrc);
- return xe_map_read32(xe, &map);
+ ldw = xe_map_read32(xe, &map);
+
+ if (xe->info.has_64bit_timestamp) {
+ map = __xe_lrc_ctx_timestamp_udw_map(lrc);
+ udw = xe_map_read32(xe, &map);
+ }
+
+ return (u64)udw << 32 | ldw;
}
/**
@@ -874,7 +900,7 @@ static void *empty_lrc_data(struct xe_hw_engine *hwe)
static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
{
- u64 desc = xe_vm_pdp4_descriptor(vm, lrc->tile);
+ u64 desc = xe_vm_pdp4_descriptor(vm, gt_to_tile(lrc->gt));
xe_lrc_write_ctx_reg(lrc, CTX_PDP0_UDW, upper_32_bits(desc));
xe_lrc_write_ctx_reg(lrc, CTX_PDP0_LDW, lower_32_bits(desc));
@@ -883,17 +909,88 @@ static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
static void xe_lrc_finish(struct xe_lrc *lrc)
{
xe_hw_fence_ctx_finish(&lrc->fence_ctx);
- xe_bo_lock(lrc->bo, false);
- xe_bo_unpin(lrc->bo);
- xe_bo_unlock(lrc->bo);
- xe_bo_put(lrc->bo);
+ xe_bo_unpin_map_no_vm(lrc->bo);
+ xe_bo_unpin_map_no_vm(lrc->bb_per_ctx_bo);
+}
+
+/*
+ * xe_lrc_setup_utilization() - Setup wa bb to assist in calculating active
+ * context run ticks.
+ * @lrc: Pointer to the lrc.
+ *
+ * Context Timestamp (CTX_TIMESTAMP) in the LRC accumulates the run ticks of the
+ * context, but only gets updated when the context switches out. In order to
+ * check how long a context has been active before it switches out, two things
+ * are required:
+ *
+ * (1) Determine if the context is running:
+ * To do so, we program the WA BB to set an initial value for CTX_TIMESTAMP in
+ * the LRC. The value chosen is 1 since 0 is the initial value when the LRC is
+ * initialized. During a query, we just check for this value to determine if the
+ * context is active. If the context switched out, it would overwrite this
+ * location with the actual CTX_TIMESTAMP MMIO value. Note that WA BB runs as
+ * the last part of context restore, so reusing this LRC location will not
+ * clobber anything.
+ *
+ * (2) Calculate the time that the context has been active for:
+ * The CTX_TIMESTAMP ticks only when the context is active. If a context is
+ * active, we just use the CTX_TIMESTAMP MMIO as the new value of utilization.
+ * While doing so, we need to read the CTX_TIMESTAMP MMIO for the specific
+ * engine instance. Since we do not know which instance the context is running
+ * on until it is scheduled, we also read the ENGINE_ID MMIO in the WA BB and
+ * store it in the PPHSWP.
+ */
+#define CONTEXT_ACTIVE 1ULL
+static int xe_lrc_setup_utilization(struct xe_lrc *lrc)
+{
+ u32 *cmd, *buf = NULL;
+
+ if (lrc->bb_per_ctx_bo->vmap.is_iomem) {
+ buf = kmalloc(lrc->bb_per_ctx_bo->size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ cmd = buf;
+ } else {
+ cmd = lrc->bb_per_ctx_bo->vmap.vaddr;
+ }
+
+ *cmd++ = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
+ *cmd++ = ENGINE_ID(0).addr;
+ *cmd++ = __xe_lrc_engine_id_ggtt_addr(lrc);
+ *cmd++ = 0;
+
+ *cmd++ = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
+ *cmd++ = __xe_lrc_ctx_timestamp_ggtt_addr(lrc);
+ *cmd++ = 0;
+ *cmd++ = lower_32_bits(CONTEXT_ACTIVE);
+
+ if (lrc_to_xe(lrc)->info.has_64bit_timestamp) {
+ *cmd++ = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
+ *cmd++ = __xe_lrc_ctx_timestamp_udw_ggtt_addr(lrc);
+ *cmd++ = 0;
+ *cmd++ = upper_32_bits(CONTEXT_ACTIVE);
+ }
+
+ *cmd++ = MI_BATCH_BUFFER_END;
+
+ if (buf) {
+ xe_map_memcpy_to(gt_to_xe(lrc->gt), &lrc->bb_per_ctx_bo->vmap, 0,
+ buf, (cmd - buf) * sizeof(*cmd));
+ kfree(buf);
+ }
+
+ xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR,
+ xe_bo_ggtt_addr(lrc->bb_per_ctx_bo) | 1);
+
+ return 0;
}
#define PVC_CTX_ASID (0x2e + 1)
#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1)
static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
- struct xe_vm *vm, u32 ring_size)
+ struct xe_vm *vm, u32 ring_size, u16 msix_vec,
+ u32 init_flags)
{
struct xe_gt *gt = hwe->gt;
struct xe_tile *tile = gt_to_tile(gt);
@@ -902,31 +999,42 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
void *init_data = NULL;
u32 arb_enable;
u32 lrc_size;
+ u32 bo_flags;
int err;
kref_init(&lrc->refcount);
+ lrc->gt = gt;
lrc->flags = 0;
lrc_size = ring_size + xe_gt_lrc_size(gt, hwe->class);
if (xe_gt_has_indirect_ring_state(gt))
lrc->flags |= XE_LRC_FLAG_INDIRECT_RING_STATE;
+ bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE;
+ if (vm && vm->xef) /* userspace */
+ bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
+
/*
* FIXME: Perma-pinning LRC as we don't yet support moving GGTT address
* via VM bind calls.
*/
- lrc->bo = xe_bo_create_pin_map(xe, tile, vm, lrc_size,
+ lrc->bo = xe_bo_create_pin_map(xe, tile, NULL, lrc_size,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ bo_flags);
if (IS_ERR(lrc->bo))
return PTR_ERR(lrc->bo);
+ lrc->bb_per_ctx_bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K,
+ ttm_bo_type_kernel,
+ bo_flags);
+ if (IS_ERR(lrc->bb_per_ctx_bo)) {
+ err = PTR_ERR(lrc->bb_per_ctx_bo);
+ goto err_lrc_finish;
+ }
+
lrc->size = lrc_size;
- lrc->tile = gt_to_tile(hwe->gt);
lrc->ring.size = ring_size;
lrc->ring.tail = 0;
- lrc->ctx_timestamp = 0;
xe_hw_fence_ctx_init(&lrc->fence_ctx, hwe->gt,
hwe->fence_irq, hwe->name);
@@ -962,6 +1070,14 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
xe_drm_client_add_bo(vm->xef->client, lrc->bo);
}
+ if (xe_device_has_msix(xe)) {
+ xe_lrc_write_ctx_reg(lrc, CTX_INT_STATUS_REPORT_PTR,
+ xe_memirq_status_ptr(&tile->memirq, hwe));
+ xe_lrc_write_ctx_reg(lrc, CTX_INT_SRC_REPORT_PTR,
+ xe_memirq_source_ptr(&tile->memirq, hwe));
+ xe_lrc_write_ctx_reg(lrc, CTX_CS_INT_VEC_DATA, msix_vec << 16 | msix_vec);
+ }
+
if (xe_gt_has_indirect_ring_state(gt)) {
xe_lrc_write_ctx_reg(lrc, CTX_INDIRECT_RING_STATE,
__xe_lrc_indirect_ring_ggtt_addr(lrc));
@@ -981,7 +1097,20 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
RING_CTL_SIZE(lrc->ring.size) | RING_VALID);
}
+ if (init_flags & XE_LRC_CREATE_RUNALONE)
+ xe_lrc_write_ctx_reg(lrc, CTX_CONTEXT_CONTROL,
+ xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) |
+ _MASKED_BIT_ENABLE(CTX_CTRL_RUN_ALONE));
+
+ if (init_flags & XE_LRC_CREATE_PXP)
+ xe_lrc_write_ctx_reg(lrc, CTX_CONTEXT_CONTROL,
+ xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) |
+ _MASKED_BIT_ENABLE(CTX_CTRL_PXP_ENABLE));
+
+ lrc->ctx_timestamp = 0;
xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP, 0);
+ if (lrc_to_xe(lrc)->info.has_64bit_timestamp)
+ xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP_UDW, 0);
if (xe->info.has_asid && vm)
xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid);
@@ -1010,6 +1139,10 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
map = __xe_lrc_start_seqno_map(lrc);
xe_map_write32(lrc_to_xe(lrc), &map, lrc->fence_ctx.next_seqno - 1);
+ err = xe_lrc_setup_utilization(lrc);
+ if (err)
+ goto err_lrc_finish;
+
return 0;
err_lrc_finish:
@@ -1022,6 +1155,8 @@ err_lrc_finish:
* @hwe: Hardware Engine
* @vm: The VM (address space)
* @ring_size: LRC ring size
+ * @msix_vec: MSI-X interrupt vector (for platforms that support it)
+ * @flags: LRC initialization flags
*
* Allocate and initialize the Logical Ring Context (LRC).
*
@@ -1029,7 +1164,7 @@ err_lrc_finish:
* upon failure.
*/
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
- u32 ring_size)
+ u32 ring_size, u16 msix_vec, u32 flags)
{
struct xe_lrc *lrc;
int err;
@@ -1038,7 +1173,7 @@ struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
if (!lrc)
return ERR_PTR(-ENOMEM);
- err = xe_lrc_init(lrc, hwe, vm, ring_size);
+ err = xe_lrc_init(lrc, hwe, vm, ring_size, msix_vec, flags);
if (err) {
kfree(lrc);
return ERR_PTR(err);
@@ -1078,6 +1213,14 @@ u32 xe_lrc_ring_tail(struct xe_lrc *lrc)
return xe_lrc_read_ctx_reg(lrc, CTX_RING_TAIL) & TAIL_ADDR;
}
+static u32 xe_lrc_ring_start(struct xe_lrc *lrc)
+{
+ if (xe_lrc_has_indirect_ring_state(lrc))
+ return xe_lrc_read_indirect_ctx_reg(lrc, INDIRECT_CTX_RING_START);
+ else
+ return xe_lrc_read_ctx_reg(lrc, CTX_RING_START);
+}
+
void xe_lrc_set_ring_head(struct xe_lrc *lrc, u32 head)
{
if (xe_lrc_has_indirect_ring_state(lrc))
@@ -1219,6 +1362,21 @@ struct iosys_map xe_lrc_parallel_map(struct xe_lrc *lrc)
return __xe_lrc_parallel_map(lrc);
}
+/**
+ * xe_lrc_engine_id() - Read engine id value
+ * @lrc: Pointer to the lrc.
+ *
+ * Returns: context id value
+ */
+static u32 xe_lrc_engine_id(struct xe_lrc *lrc)
+{
+ struct xe_device *xe = lrc_to_xe(lrc);
+ struct iosys_map map;
+
+ map = __xe_lrc_engine_id_map(lrc);
+ return xe_map_read32(xe, &map);
+}
+
static int instr_dw(u32 cmd_header)
{
/* GFXPIPE "SINGLE_DW" opcodes are a single dword */
@@ -1426,6 +1584,7 @@ static int dump_gfxpipe_command(struct drm_printer *p,
MATCH3D(3DSTATE_CLIP_MESH);
MATCH3D(3DSTATE_SBE_MESH);
MATCH3D(3DSTATE_CPSIZE_CONTROL_BUFFER);
+ MATCH3D(3DSTATE_COARSE_PIXEL);
MATCH3D(3DSTATE_DRAWING_RECTANGLE);
MATCH3D(3DSTATE_CHROMA_KEY);
@@ -1649,21 +1808,20 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
if (!snapshot)
return NULL;
- if (lrc->bo->vm)
- xe_vm_get(lrc->bo->vm);
-
snapshot->context_desc = xe_lrc_ggtt_addr(lrc);
+ snapshot->ring_addr = __xe_lrc_ring_ggtt_addr(lrc);
snapshot->indirect_context_desc = xe_lrc_indirect_ring_ggtt_addr(lrc);
snapshot->head = xe_lrc_ring_head(lrc);
snapshot->tail.internal = lrc->ring.tail;
snapshot->tail.memory = xe_lrc_ring_tail(lrc);
+ snapshot->start = xe_lrc_ring_start(lrc);
snapshot->start_seqno = xe_lrc_start_seqno(lrc);
snapshot->seqno = xe_lrc_seqno(lrc);
snapshot->lrc_bo = xe_bo_get(lrc->bo);
snapshot->lrc_offset = xe_lrc_pphwsp_offset(lrc);
snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset;
snapshot->lrc_snapshot = NULL;
- snapshot->ctx_timestamp = xe_lrc_ctx_timestamp(lrc);
+ snapshot->ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(lrc));
snapshot->ctx_job_timestamp = xe_lrc_ctx_job_timestamp(lrc);
return snapshot;
}
@@ -1671,14 +1829,12 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot)
{
struct xe_bo *bo;
- struct xe_vm *vm;
struct iosys_map src;
if (!snapshot)
return;
bo = snapshot->lrc_bo;
- vm = bo->vm;
snapshot->lrc_bo = NULL;
snapshot->lrc_snapshot = kvmalloc(snapshot->lrc_size, GFP_KERNEL);
@@ -1698,8 +1854,6 @@ void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot)
xe_bo_unlock(bo);
put_bo:
xe_bo_put(bo);
- if (vm)
- xe_vm_put(vm);
}
void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer *p)
@@ -1710,11 +1864,14 @@ void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer
return;
drm_printf(p, "\tHW Context Desc: 0x%08x\n", snapshot->context_desc);
+ drm_printf(p, "\tHW Ring address: 0x%08x\n",
+ snapshot->ring_addr);
drm_printf(p, "\tHW Indirect Ring State: 0x%08x\n",
snapshot->indirect_context_desc);
drm_printf(p, "\tLRC Head: (memory) %u\n", snapshot->head);
drm_printf(p, "\tLRC Tail: (internal) %u, (memory) %u\n",
snapshot->tail.internal, snapshot->tail.memory);
+ drm_printf(p, "\tRing start: (memory) 0x%08x\n", snapshot->start);
drm_printf(p, "\tStart seqno: (memory) %d\n", snapshot->start_seqno);
drm_printf(p, "\tSeqno: (memory) %d\n", snapshot->seqno);
drm_printf(p, "\tTimestamp: 0x%08x\n", snapshot->ctx_timestamp);
@@ -1749,32 +1906,94 @@ void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot)
return;
kvfree(snapshot->lrc_snapshot);
- if (snapshot->lrc_bo) {
- struct xe_vm *vm;
-
- vm = snapshot->lrc_bo->vm;
+ if (snapshot->lrc_bo)
xe_bo_put(snapshot->lrc_bo);
- if (vm)
- xe_vm_put(vm);
- }
+
kfree(snapshot);
}
+static int get_ctx_timestamp(struct xe_lrc *lrc, u32 engine_id, u64 *reg_ctx_ts)
+{
+ u16 class = REG_FIELD_GET(ENGINE_CLASS_ID, engine_id);
+ u16 instance = REG_FIELD_GET(ENGINE_INSTANCE_ID, engine_id);
+ struct xe_hw_engine *hwe;
+ u64 val;
+
+ hwe = xe_gt_hw_engine(lrc->gt, class, instance, false);
+ if (xe_gt_WARN_ONCE(lrc->gt, !hwe || xe_hw_engine_is_reserved(hwe),
+ "Unexpected engine class:instance %d:%d for context utilization\n",
+ class, instance))
+ return -1;
+
+ if (lrc_to_xe(lrc)->info.has_64bit_timestamp)
+ val = xe_mmio_read64_2x32(&hwe->gt->mmio,
+ RING_CTX_TIMESTAMP(hwe->mmio_base));
+ else
+ val = xe_mmio_read32(&hwe->gt->mmio,
+ RING_CTX_TIMESTAMP(hwe->mmio_base));
+
+ *reg_ctx_ts = val;
+
+ return 0;
+}
+
/**
* xe_lrc_update_timestamp() - Update ctx timestamp
* @lrc: Pointer to the lrc.
* @old_ts: Old timestamp value
*
* Populate @old_ts current saved ctx timestamp, read new ctx timestamp and
- * update saved value.
+ * update saved value. With support for active contexts, the calculation may be
+ * slightly racy, so follow a read-again logic to ensure that the context is
+ * still active before returning the right timestamp.
*
* Returns: New ctx timestamp value
*/
-u32 xe_lrc_update_timestamp(struct xe_lrc *lrc, u32 *old_ts)
+u64 xe_lrc_update_timestamp(struct xe_lrc *lrc, u64 *old_ts)
{
+ u64 lrc_ts, reg_ts;
+ u32 engine_id;
+
*old_ts = lrc->ctx_timestamp;
- lrc->ctx_timestamp = xe_lrc_ctx_timestamp(lrc);
+ lrc_ts = xe_lrc_ctx_timestamp(lrc);
+ /* CTX_TIMESTAMP mmio read is invalid on VF, so return the LRC value */
+ if (IS_SRIOV_VF(lrc_to_xe(lrc))) {
+ lrc->ctx_timestamp = lrc_ts;
+ goto done;
+ }
+
+ if (lrc_ts == CONTEXT_ACTIVE) {
+ engine_id = xe_lrc_engine_id(lrc);
+ if (!get_ctx_timestamp(lrc, engine_id, &reg_ts))
+ lrc->ctx_timestamp = reg_ts;
+
+ /* read lrc again to ensure context is still active */
+ lrc_ts = xe_lrc_ctx_timestamp(lrc);
+ }
+
+ /*
+ * If context switched out, just use the lrc_ts. Note that this needs to
+ * be a separate if condition.
+ */
+ if (lrc_ts != CONTEXT_ACTIVE)
+ lrc->ctx_timestamp = lrc_ts;
+
+done:
+ trace_xe_lrc_update_timestamp(lrc, *old_ts);
return lrc->ctx_timestamp;
}
+
+/**
+ * xe_lrc_ring_is_idle() - LRC is idle
+ * @lrc: Pointer to the lrc.
+ *
+ * Compare LRC ring head and tail to determine if idle.
+ *
+ * Return: True is ring is idle, False otherwise
+ */
+bool xe_lrc_ring_is_idle(struct xe_lrc *lrc)
+{
+ return xe_lrc_ring_head(lrc) == xe_lrc_ring_tail(lrc);
+}
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
index c24542e89318..eb6e8de8c939 100644
--- a/drivers/gpu/drm/xe/xe_lrc.h
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -17,13 +17,35 @@ enum xe_engine_class;
struct xe_gt;
struct xe_hw_engine;
struct xe_lrc;
-struct xe_lrc_snapshot;
struct xe_vm;
-#define LRC_PPHWSP_SCRATCH_ADDR (0x34 * 4)
-
+struct xe_lrc_snapshot {
+ struct xe_bo *lrc_bo;
+ void *lrc_snapshot;
+ unsigned long lrc_size, lrc_offset;
+
+ u32 context_desc;
+ u32 ring_addr;
+ u32 indirect_context_desc;
+ u32 head;
+ u32 start;
+ struct {
+ u32 internal;
+ u32 memory;
+ } tail;
+ u32 start_seqno;
+ u32 seqno;
+ u32 ctx_timestamp;
+ u32 ctx_job_timestamp;
+};
+
+#define LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR (0x34 * 4)
+#define LRC_PPHWSP_PXP_INVAL_SCRATCH_ADDR (0x40 * 4)
+
+#define XE_LRC_CREATE_RUNALONE 0x1
+#define XE_LRC_CREATE_PXP 0x2
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
- u32 ring_size);
+ u32 ring_size, u16 msix_vec, u32 flags);
void xe_lrc_destroy(struct kref *ref);
/**
@@ -61,6 +83,8 @@ u32 xe_lrc_ring_head(struct xe_lrc *lrc);
u32 xe_lrc_ring_space(struct xe_lrc *lrc);
void xe_lrc_write_ring(struct xe_lrc *lrc, const void *data, size_t size);
+bool xe_lrc_ring_is_idle(struct xe_lrc *lrc);
+
u32 xe_lrc_indirect_ring_ggtt_addr(struct xe_lrc *lrc);
u32 xe_lrc_ggtt_addr(struct xe_lrc *lrc);
u32 *xe_lrc_regs(struct xe_lrc *lrc);
@@ -96,7 +120,8 @@ void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer
void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot);
u32 xe_lrc_ctx_timestamp_ggtt_addr(struct xe_lrc *lrc);
-u32 xe_lrc_ctx_timestamp(struct xe_lrc *lrc);
+u32 xe_lrc_ctx_timestamp_udw_ggtt_addr(struct xe_lrc *lrc);
+u64 xe_lrc_ctx_timestamp(struct xe_lrc *lrc);
u32 xe_lrc_ctx_job_timestamp_ggtt_addr(struct xe_lrc *lrc);
u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc);
@@ -112,6 +137,6 @@ u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc);
*
* Returns the current LRC timestamp
*/
-u32 xe_lrc_update_timestamp(struct xe_lrc *lrc, u32 *old_ts);
+u64 xe_lrc_update_timestamp(struct xe_lrc *lrc, u64 *old_ts);
#endif
diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h
index 71ecb453f811..ae24cf6f8dd9 100644
--- a/drivers/gpu/drm/xe/xe_lrc_types.h
+++ b/drivers/gpu/drm/xe/xe_lrc_types.h
@@ -25,8 +25,8 @@ struct xe_lrc {
/** @size: size of lrc including any indirect ring state page */
u32 size;
- /** @tile: tile which this LRC belongs to */
- struct xe_tile *tile;
+ /** @gt: gt which this LRC belongs to */
+ struct xe_gt *gt;
/** @flags: LRC flags */
#define XE_LRC_FLAG_INDIRECT_RING_STATE 0x1
@@ -52,7 +52,10 @@ struct xe_lrc {
struct xe_hw_fence_ctx fence_ctx;
/** @ctx_timestamp: readout value of CTX_TIMESTAMP on last update */
- u32 ctx_timestamp;
+ u64 ctx_timestamp;
+
+ /** @bb_per_ctx_bo: buffer object for per context batch wa buffer */
+ struct xe_bo *bb_per_ctx_bo;
};
struct xe_lrc_snapshot;
diff --git a/drivers/gpu/drm/xe/xe_macros.h b/drivers/gpu/drm/xe/xe_macros.h
index daf56c846d03..8a77c2423555 100644
--- a/drivers/gpu/drm/xe/xe_macros.h
+++ b/drivers/gpu/drm/xe/xe_macros.h
@@ -10,9 +10,13 @@
#define XE_WARN_ON WARN_ON
-#define XE_IOCTL_DBG(xe, cond) \
- ((cond) && (drm_dbg(&(xe)->drm, \
- "Ioctl argument check failed at %s:%d: %s", \
- __FILE__, __LINE__, #cond), 1))
+#define XE_IOCTL_DBG(xe, cond) ({ \
+ int cond__ = !!(cond); \
+ if (cond__) \
+ drm_dbg(&(xe)->drm, \
+ "Ioctl argument check failed at %s:%d: %s", \
+ __FILE__, __LINE__, #cond); \
+ cond__; \
+})
#endif
diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c
index 95b6e9d7b7db..49c45ec3e83c 100644
--- a/drivers/gpu/drm/xe/xe_memirq.c
+++ b/drivers/gpu/drm/xe/xe_memirq.c
@@ -5,8 +5,8 @@
#include <drm/drm_managed.h>
-#include "regs/xe_gt_regs.h"
#include "regs/xe_guc_regs.h"
+#include "regs/xe_irq_regs.h"
#include "regs/xe_regs.h"
#include "xe_assert.h"
@@ -19,15 +19,25 @@
#include "xe_hw_engine.h"
#include "xe_map.h"
#include "xe_memirq.h"
-#include "xe_sriov.h"
-#include "xe_sriov_printk.h"
#define memirq_assert(m, condition) xe_tile_assert(memirq_to_tile(m), condition)
-#define memirq_debug(m, msg...) xe_sriov_dbg_verbose(memirq_to_xe(m), "MEMIRQ: " msg)
+#define memirq_printk(m, _level, _fmt, ...) \
+ drm_##_level(&memirq_to_xe(m)->drm, "MEMIRQ%u: " _fmt, \
+ memirq_to_tile(m)->id, ##__VA_ARGS__)
+
+#ifdef CONFIG_DRM_XE_DEBUG_MEMIRQ
+#define memirq_debug(m, _fmt, ...) memirq_printk(m, dbg, _fmt, ##__VA_ARGS__)
+#else
+#define memirq_debug(...)
+#endif
+
+#define memirq_err(m, _fmt, ...) memirq_printk(m, err, _fmt, ##__VA_ARGS__)
+#define memirq_err_ratelimited(m, _fmt, ...) \
+ memirq_printk(m, err_ratelimited, _fmt, ##__VA_ARGS__)
static struct xe_tile *memirq_to_tile(struct xe_memirq *memirq)
{
- return container_of(memirq, struct xe_tile, sriov.vf.memirq);
+ return container_of(memirq, struct xe_tile, memirq);
}
static struct xe_device *memirq_to_xe(struct xe_memirq *memirq)
@@ -76,7 +86,7 @@ static const char *guc_name(struct xe_guc *guc)
* This object needs to be 4KiB aligned.
*
* - _`Interrupt Source Report Page`: this is the equivalent of the
- * GEN11_GT_INTR_DWx registers, with each bit in those registers being
+ * GT_INTR_DWx registers, with each bit in those registers being
* mapped to a byte here. The offsets are the same, just bytes instead
* of bits. This object needs to be cacheline aligned.
*
@@ -105,33 +115,74 @@ static const char *guc_name(struct xe_guc *guc)
* | |
* | |
* +-----------+
+ *
+ *
+ * MSI-X use case
+ *
+ * When using MSI-X, hw engines report interrupt status and source to engine
+ * instance 0. For this scenario, in order to differentiate between the
+ * engines, we need to pass different status/source pointers in the LRC.
+ *
+ * The requirements on those pointers are:
+ * - Interrupt status should be 4KiB aligned
+ * - Interrupt source should be 64 bytes aligned
+ *
+ * To accommodate this, we duplicate the memirq page layout above -
+ * allocating a page for each engine instance and pass this page in the LRC.
+ * Note that the same page can be reused for different engine types.
+ * For example, an LRC executing on CCS #x will have pointers to page #x,
+ * and an LRC executing on BCS #x will have the same pointers.
+ *
+ * ::
+ *
+ * 0x0000 +==============================+ <== page for instance 0 (BCS0, CCS0, etc.)
+ * | Interrupt Status Report Page |
+ * 0x0400 +==============================+
+ * | Interrupt Source Report Page |
+ * 0x0440 +==============================+
+ * | Interrupt Enable Mask |
+ * +==============================+
+ * | Not used |
+ * 0x1000 +==============================+ <== page for instance 1 (BCS1, CCS1, etc.)
+ * | Interrupt Status Report Page |
+ * 0x1400 +==============================+
+ * | Interrupt Source Report Page |
+ * 0x1440 +==============================+
+ * | Not used |
+ * 0x2000 +==============================+ <== page for instance 2 (BCS2, CCS2, etc.)
+ * | ... |
+ * +==============================+
+ *
*/
-static void __release_xe_bo(struct drm_device *drm, void *arg)
+static inline bool hw_reports_to_instance_zero(struct xe_memirq *memirq)
{
- struct xe_bo *bo = arg;
-
- xe_bo_unpin_map_no_vm(bo);
+ /*
+ * When the HW engines are configured to use MSI-X,
+ * they report interrupt status and source to the offset of
+ * engine instance 0.
+ */
+ return xe_device_has_msix(memirq_to_xe(memirq));
}
static int memirq_alloc_pages(struct xe_memirq *memirq)
{
struct xe_device *xe = memirq_to_xe(memirq);
struct xe_tile *tile = memirq_to_tile(memirq);
+ size_t bo_size = hw_reports_to_instance_zero(memirq) ?
+ XE_HW_ENGINE_MAX_INSTANCE * SZ_4K : SZ_4K;
struct xe_bo *bo;
int err;
- BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_SOURCE_OFFSET, SZ_64));
- BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_STATUS_OFFSET, SZ_4K));
-
- /* XXX: convert to managed bo */
- bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE |
- XE_BO_FLAG_NEEDS_UC |
- XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_SOURCE_OFFSET(0), SZ_64));
+ BUILD_BUG_ON(!IS_ALIGNED(XE_MEMIRQ_STATUS_OFFSET(0), SZ_4K));
+
+ bo = xe_managed_bo_create_pin_map(xe, tile, bo_size,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_NEEDS_UC |
+ XE_BO_FLAG_NEEDS_CPU_ACCESS);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out;
@@ -140,25 +191,25 @@ static int memirq_alloc_pages(struct xe_memirq *memirq)
memirq_assert(memirq, !xe_bo_is_vram(bo));
memirq_assert(memirq, !memirq->bo);
- iosys_map_memset(&bo->vmap, 0, 0, SZ_4K);
+ iosys_map_memset(&bo->vmap, 0, 0, bo_size);
memirq->bo = bo;
- memirq->source = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_SOURCE_OFFSET);
- memirq->status = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_STATUS_OFFSET);
+ memirq->source = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_SOURCE_OFFSET(0));
+ memirq->status = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_STATUS_OFFSET(0));
memirq->mask = IOSYS_MAP_INIT_OFFSET(&bo->vmap, XE_MEMIRQ_ENABLE_OFFSET);
memirq_assert(memirq, !memirq->source.is_iomem);
memirq_assert(memirq, !memirq->status.is_iomem);
memirq_assert(memirq, !memirq->mask.is_iomem);
- memirq_debug(memirq, "page offsets: source %#x status %#x\n",
- xe_memirq_source_ptr(memirq), xe_memirq_status_ptr(memirq));
+ memirq_debug(memirq, "page offsets: bo %#x bo_size %zu source %#x status %#x\n",
+ xe_bo_ggtt_addr(bo), bo_size, XE_MEMIRQ_SOURCE_OFFSET(0),
+ XE_MEMIRQ_STATUS_OFFSET(0));
- return drmm_add_action_or_reset(&xe->drm, __release_xe_bo, memirq->bo);
+ return 0;
out:
- xe_sriov_err(memirq_to_xe(memirq),
- "Failed to allocate memirq page (%pe)\n", ERR_PTR(err));
+ memirq_err(memirq, "Failed to allocate memirq page (%pe)\n", ERR_PTR(err));
return err;
}
@@ -178,9 +229,7 @@ static void memirq_set_enable(struct xe_memirq *memirq, bool enable)
*
* These allocations are managed and will be implicitly released on unload.
*
- * Note: This function shall be called only by the VF driver.
- *
- * If this function fails then VF driver won't be able to operate correctly.
+ * If this function fails then the driver won't be able to operate correctly.
* If `Memory Based Interrupts`_ are not used this function will return 0.
*
* Return: 0 on success or a negative error code on failure.
@@ -190,9 +239,7 @@ int xe_memirq_init(struct xe_memirq *memirq)
struct xe_device *xe = memirq_to_xe(memirq);
int err;
- memirq_assert(memirq, IS_SRIOV_VF(xe));
-
- if (!xe_device_has_memirq(xe))
+ if (!xe_device_uses_memirq(xe))
return 0;
err = memirq_alloc_pages(memirq);
@@ -205,55 +252,70 @@ int xe_memirq_init(struct xe_memirq *memirq)
return 0;
}
+static u32 __memirq_source_page(struct xe_memirq *memirq, u16 instance)
+{
+ memirq_assert(memirq, instance <= XE_HW_ENGINE_MAX_INSTANCE);
+ memirq_assert(memirq, memirq->bo);
+
+ instance = hw_reports_to_instance_zero(memirq) ? instance : 0;
+ return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_SOURCE_OFFSET(instance);
+}
+
/**
* xe_memirq_source_ptr - Get GGTT's offset of the `Interrupt Source Report Page`_.
* @memirq: the &xe_memirq to query
+ * @hwe: the hw engine for which we want the report page
*
- * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
+ * Shall be called when `Memory Based Interrupts`_ are used
* and xe_memirq_init() didn't fail.
*
* Return: GGTT's offset of the `Interrupt Source Report Page`_.
*/
-u32 xe_memirq_source_ptr(struct xe_memirq *memirq)
+u32 xe_memirq_source_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe)
{
- memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
- memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
+
+ return __memirq_source_page(memirq, hwe->instance);
+}
+
+static u32 __memirq_status_page(struct xe_memirq *memirq, u16 instance)
+{
+ memirq_assert(memirq, instance <= XE_HW_ENGINE_MAX_INSTANCE);
memirq_assert(memirq, memirq->bo);
- return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_SOURCE_OFFSET;
+ instance = hw_reports_to_instance_zero(memirq) ? instance : 0;
+ return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_STATUS_OFFSET(instance);
}
/**
* xe_memirq_status_ptr - Get GGTT's offset of the `Interrupt Status Report Page`_.
* @memirq: the &xe_memirq to query
+ * @hwe: the hw engine for which we want the report page
*
- * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
+ * Shall be called when `Memory Based Interrupts`_ are used
* and xe_memirq_init() didn't fail.
*
* Return: GGTT's offset of the `Interrupt Status Report Page`_.
*/
-u32 xe_memirq_status_ptr(struct xe_memirq *memirq)
+u32 xe_memirq_status_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe)
{
- memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
- memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
- memirq_assert(memirq, memirq->bo);
+ memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
- return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_STATUS_OFFSET;
+ return __memirq_status_page(memirq, hwe->instance);
}
/**
* xe_memirq_enable_ptr - Get GGTT's offset of the Interrupt Enable Mask.
* @memirq: the &xe_memirq to query
*
- * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
+ * Shall be called when `Memory Based Interrupts`_ are used
* and xe_memirq_init() didn't fail.
*
* Return: GGTT's offset of the Interrupt Enable Mask.
*/
u32 xe_memirq_enable_ptr(struct xe_memirq *memirq)
{
- memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
- memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
memirq_assert(memirq, memirq->bo);
return xe_bo_ggtt_addr(memirq->bo) + XE_MEMIRQ_ENABLE_OFFSET;
@@ -267,7 +329,7 @@ u32 xe_memirq_enable_ptr(struct xe_memirq *memirq)
* Register `Interrupt Source Report Page`_ and `Interrupt Status Report Page`_
* to be used by the GuC when `Memory Based Interrupts`_ are required.
*
- * Shall be called only on VF driver when `Memory Based Interrupts`_ are used
+ * Shall be called when `Memory Based Interrupts`_ are used
* and xe_memirq_init() didn't fail.
*
* Return: 0 on success or a negative error code on failure.
@@ -279,12 +341,10 @@ int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc)
u32 source, status;
int err;
- memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
- memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
- memirq_assert(memirq, memirq->bo);
+ memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
- source = xe_memirq_source_ptr(memirq) + offset;
- status = xe_memirq_status_ptr(memirq) + offset * SZ_16;
+ source = __memirq_source_page(memirq, 0) + offset;
+ status = __memirq_status_page(memirq, 0) + offset * SZ_16;
err = xe_guc_self_cfg64(guc, GUC_KLV_SELF_CFG_MEMIRQ_SOURCE_ADDR_KEY,
source);
@@ -299,9 +359,8 @@ int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc)
return 0;
failed:
- xe_sriov_err(memirq_to_xe(memirq),
- "Failed to setup report pages in %s (%pe)\n",
- guc_name(guc), ERR_PTR(err));
+ memirq_err(memirq, "Failed to setup report pages in %s (%pe)\n",
+ guc_name(guc), ERR_PTR(err));
return err;
}
@@ -311,13 +370,12 @@ failed:
*
* This is part of the driver IRQ setup flow.
*
- * This function shall only be used by the VF driver on platforms that use
+ * This function shall only be used on platforms that use
* `Memory Based Interrupts`_.
*/
void xe_memirq_reset(struct xe_memirq *memirq)
{
- memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
- memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
if (memirq->bo)
memirq_set_enable(memirq, false);
@@ -329,13 +387,12 @@ void xe_memirq_reset(struct xe_memirq *memirq)
*
* This is part of the driver IRQ setup flow.
*
- * This function shall only be used by the VF driver on platforms that use
+ * This function shall only be used on platforms that use
* `Memory Based Interrupts`_.
*/
void xe_memirq_postinstall(struct xe_memirq *memirq)
{
- memirq_assert(memirq, IS_SRIOV_VF(memirq_to_xe(memirq)));
- memirq_assert(memirq, xe_device_has_memirq(memirq_to_xe(memirq)));
+ memirq_assert(memirq, xe_device_uses_memirq(memirq_to_xe(memirq)));
if (memirq->bo)
memirq_set_enable(memirq, true);
@@ -349,9 +406,9 @@ static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
value = iosys_map_rd(vector, offset, u8);
if (value) {
if (value != 0xff)
- xe_sriov_err_ratelimited(memirq_to_xe(memirq),
- "Unexpected memirq value %#x from %s at %u\n",
- value, name, offset);
+ memirq_err_ratelimited(memirq,
+ "Unexpected memirq value %#x from %s at %u\n",
+ value, name, offset);
iosys_map_wr(vector, offset, u8, 0x00);
}
@@ -376,6 +433,31 @@ static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *stat
if (memirq_received(memirq, status, ilog2(GUC_INTR_GUC2HOST), name))
xe_guc_irq_handler(guc, GUC_INTR_GUC2HOST);
+
+ if (memirq_received(memirq, status, ilog2(GUC_INTR_SW_INT_0), name))
+ xe_guc_irq_handler(guc, GUC_INTR_SW_INT_0);
+}
+
+/**
+ * xe_memirq_hwe_handler - Check and process interrupts for a specific HW engine.
+ * @memirq: the &xe_memirq
+ * @hwe: the hw engine to process
+ *
+ * This function reads and dispatches `Memory Based Interrupts` for the provided HW engine.
+ */
+void xe_memirq_hwe_handler(struct xe_memirq *memirq, struct xe_hw_engine *hwe)
+{
+ u16 offset = hwe->irq_offset;
+ u16 instance = hw_reports_to_instance_zero(memirq) ? hwe->instance : 0;
+ struct iosys_map src_offset = IOSYS_MAP_INIT_OFFSET(&memirq->bo->vmap,
+ XE_MEMIRQ_SOURCE_OFFSET(instance));
+
+ if (memirq_received(memirq, &src_offset, offset, "SRC")) {
+ struct iosys_map status_offset =
+ IOSYS_MAP_INIT_OFFSET(&memirq->bo->vmap,
+ XE_MEMIRQ_STATUS_OFFSET(instance) + offset * SZ_16);
+ memirq_dispatch_engine(memirq, &status_offset, hwe);
+ }
}
/**
@@ -405,13 +487,8 @@ void xe_memirq_handler(struct xe_memirq *memirq)
if (gt->tile != tile)
continue;
- for_each_hw_engine(hwe, gt, id) {
- if (memirq_received(memirq, &memirq->source, hwe->irq_offset, "SRC")) {
- map = IOSYS_MAP_INIT_OFFSET(&memirq->status,
- hwe->irq_offset * SZ_16);
- memirq_dispatch_engine(memirq, &map, hwe);
- }
- }
+ for_each_hw_engine(hwe, gt, id)
+ xe_memirq_hwe_handler(memirq, hwe);
}
/* GuC and media GuC (if present) must be checked separately */
diff --git a/drivers/gpu/drm/xe/xe_memirq.h b/drivers/gpu/drm/xe/xe_memirq.h
index 2d40d03c3095..06130650e9d6 100644
--- a/drivers/gpu/drm/xe/xe_memirq.h
+++ b/drivers/gpu/drm/xe/xe_memirq.h
@@ -9,16 +9,18 @@
#include <linux/types.h>
struct xe_guc;
+struct xe_hw_engine;
struct xe_memirq;
int xe_memirq_init(struct xe_memirq *memirq);
-u32 xe_memirq_source_ptr(struct xe_memirq *memirq);
-u32 xe_memirq_status_ptr(struct xe_memirq *memirq);
+u32 xe_memirq_source_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe);
+u32 xe_memirq_status_ptr(struct xe_memirq *memirq, struct xe_hw_engine *hwe);
u32 xe_memirq_enable_ptr(struct xe_memirq *memirq);
void xe_memirq_reset(struct xe_memirq *memirq);
void xe_memirq_postinstall(struct xe_memirq *memirq);
+void xe_memirq_hwe_handler(struct xe_memirq *memirq, struct xe_hw_engine *hwe);
void xe_memirq_handler(struct xe_memirq *memirq);
int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc);
diff --git a/drivers/gpu/drm/xe/xe_memirq_types.h b/drivers/gpu/drm/xe/xe_memirq_types.h
index 625b6b8736cc..9d0f6c1cdb9d 100644
--- a/drivers/gpu/drm/xe/xe_memirq_types.h
+++ b/drivers/gpu/drm/xe/xe_memirq_types.h
@@ -11,9 +11,9 @@
struct xe_bo;
/* ISR */
-#define XE_MEMIRQ_STATUS_OFFSET 0x0
+#define XE_MEMIRQ_STATUS_OFFSET(inst) ((inst) * SZ_4K + 0x0)
/* IIR */
-#define XE_MEMIRQ_SOURCE_OFFSET 0x400
+#define XE_MEMIRQ_SOURCE_OFFSET(inst) ((inst) * SZ_4K + 0x400)
/* IMR */
#define XE_MEMIRQ_ENABLE_OFFSET 0x440
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index cfd31ae49cc1..8f8e9fdfb2a8 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -97,7 +97,7 @@ struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile)
return tile->migrate->q;
}
-static void xe_migrate_fini(struct drm_device *dev, void *arg)
+static void xe_migrate_fini(void *arg)
{
struct xe_migrate *m = arg;
@@ -209,7 +209,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
num_entries * XE_PAGE_SIZE,
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_PAGETABLE);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -400,7 +400,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
struct xe_vm *vm;
int err;
- m = drmm_kzalloc(&xe->drm, sizeof(*m), GFP_KERNEL);
+ m = devm_kzalloc(xe->drm.dev, sizeof(*m), GFP_KERNEL);
if (!m)
return ERR_PTR(-ENOMEM);
@@ -454,7 +454,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
might_lock(&m->job_mutex);
fs_reclaim_release(GFP_KERNEL);
- err = drmm_add_action_or_reset(&xe->drm, xe_migrate_fini, m);
+ err = devm_add_action_or_reset(xe->drm.dev, xe_migrate_fini, m);
if (err)
return ERR_PTR(err);
@@ -669,6 +669,7 @@ static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
u32 mocs = 0;
u32 tile_y = 0;
+ xe_gt_assert(gt, !(pitch & 3));
xe_gt_assert(gt, size / pitch <= S16_MAX);
xe_gt_assert(gt, pitch / 4 <= S16_MAX);
xe_gt_assert(gt, pitch <= U16_MAX);
@@ -778,10 +779,12 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
bool dst_is_pltt = dst->mem_type == XE_PL_TT;
bool src_is_vram = mem_type_is_vram(src->mem_type);
bool dst_is_vram = mem_type_is_vram(dst->mem_type);
+ bool type_device = src_bo->ttm.type == ttm_bo_type_device;
+ bool needs_ccs_emit = type_device && xe_migrate_needs_ccs_emit(xe);
bool copy_ccs = xe_device_has_flat_ccs(xe) &&
xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo);
bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram);
- bool use_comp_pat = xe_device_has_flat_ccs(xe) &&
+ bool use_comp_pat = type_device && xe_device_has_flat_ccs(xe) &&
GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram;
/* Copying CCS between two different BOs is not supported yet. */
@@ -838,6 +841,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
avail_pts, avail_pts);
if (copy_system_ccs) {
+ xe_assert(xe, type_device);
ccs_size = xe_device_ccs_bytes(xe, src_L0);
batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size,
&ccs_ofs, &ccs_pt, 0,
@@ -848,7 +852,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
/* Add copy commands size here */
batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) +
- ((xe_migrate_needs_ccs_emit(xe) ? EMIT_COPY_CCS_DW : 0));
+ ((needs_ccs_emit ? EMIT_COPY_CCS_DW : 0));
bb = xe_bb_new(gt, batch_size, usm);
if (IS_ERR(bb)) {
@@ -877,7 +881,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
if (!copy_only_ccs)
emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE);
- if (xe_migrate_needs_ccs_emit(xe))
+ if (needs_ccs_emit)
flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs,
IS_DGFX(xe) ? src_is_vram : src_is_pltt,
dst_L0_ofs,
@@ -1176,7 +1180,7 @@ err:
err_sync:
/* Sync partial copies if any. FIXME: job_mutex? */
if (fence) {
- dma_fence_wait(m->fence, false);
+ dma_fence_wait(fence, false);
dma_fence_put(fence);
}
@@ -1350,6 +1354,7 @@ __xe_migrate_update_pgtables(struct xe_migrate *m,
/* For sysmem PTE's, need to map them in our hole.. */
if (!IS_DGFX(xe)) {
+ u16 pat_index = xe->pat.idx[XE_CACHE_WB];
u32 ptes, ofs;
ppgtt_ofs = NUM_KERNEL_PDE - 1;
@@ -1409,7 +1414,7 @@ __xe_migrate_update_pgtables(struct xe_migrate *m,
pt_bo->update_index = current_update;
addr = vm->pt_ops->pte_encode_bo(pt_bo, 0,
- XE_CACHE_WB, 0);
+ pat_index, 0);
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
}
@@ -1504,7 +1509,7 @@ err_bb:
* using the default engine for the updates, they will be performed in the
* order they grab the job_mutex. If different engines are used, external
* synchronization is needed for overlapping updates to maintain page-table
- * consistency. Note that the meaing of "overlapping" is that the updates
+ * consistency. Note that the meaning of "overlapping" is that the updates
* touch the same page-table, which might be a higher-level page-directory.
* If no pipelining is needed, then updates may be performed by the cpu.
*
@@ -1542,6 +1547,372 @@ void xe_migrate_wait(struct xe_migrate *m)
dma_fence_wait(m->fence, false);
}
+static u32 pte_update_cmd_size(u64 size)
+{
+ u32 num_dword;
+ u64 entries = DIV_U64_ROUND_UP(size, XE_PAGE_SIZE);
+
+ XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER);
+ /*
+ * MI_STORE_DATA_IMM command is used to update page table. Each
+ * instruction can update maximumly 0x1ff pte entries. To update
+ * n (n <= 0x1ff) pte entries, we need:
+ * 1 dword for the MI_STORE_DATA_IMM command header (opcode etc)
+ * 2 dword for the page table's physical location
+ * 2*n dword for value of pte to fill (each pte entry is 2 dwords)
+ */
+ num_dword = (1 + 2) * DIV_U64_ROUND_UP(entries, 0x1ff);
+ num_dword += entries * 2;
+
+ return num_dword;
+}
+
+static void build_pt_update_batch_sram(struct xe_migrate *m,
+ struct xe_bb *bb, u32 pt_offset,
+ dma_addr_t *sram_addr, u32 size)
+{
+ u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
+ u32 ptes;
+ int i = 0;
+
+ ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
+ while (ptes) {
+ u32 chunk = min(0x1ffU, ptes);
+
+ bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
+ bb->cs[bb->len++] = pt_offset;
+ bb->cs[bb->len++] = 0;
+
+ pt_offset += chunk * 8;
+ ptes -= chunk;
+
+ while (chunk--) {
+ u64 addr = sram_addr[i++] & PAGE_MASK;
+
+ xe_tile_assert(m->tile, addr);
+ addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
+ addr, pat_index,
+ 0, false, 0);
+ bb->cs[bb->len++] = lower_32_bits(addr);
+ bb->cs[bb->len++] = upper_32_bits(addr);
+ }
+ }
+}
+
+enum xe_migrate_copy_dir {
+ XE_MIGRATE_COPY_TO_VRAM,
+ XE_MIGRATE_COPY_TO_SRAM,
+};
+
+#define XE_CACHELINE_BYTES 64ull
+#define XE_CACHELINE_MASK (XE_CACHELINE_BYTES - 1)
+
+static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
+ unsigned long len,
+ unsigned long sram_offset,
+ dma_addr_t *sram_addr, u64 vram_addr,
+ const enum xe_migrate_copy_dir dir)
+{
+ struct xe_gt *gt = m->tile->primary_gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ bool use_usm_batch = xe->info.has_usm;
+ struct dma_fence *fence = NULL;
+ u32 batch_size = 2;
+ u64 src_L0_ofs, dst_L0_ofs;
+ struct xe_sched_job *job;
+ struct xe_bb *bb;
+ u32 update_idx, pt_slot = 0;
+ unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE);
+ unsigned int pitch = len >= PAGE_SIZE && !(len & ~PAGE_MASK) ?
+ PAGE_SIZE : 4;
+ int err;
+
+ if (drm_WARN_ON(&xe->drm, (len & XE_CACHELINE_MASK) ||
+ (sram_offset | vram_addr) & XE_CACHELINE_MASK))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER);
+
+ batch_size += pte_update_cmd_size(len);
+ batch_size += EMIT_COPY_DW;
+
+ bb = xe_bb_new(gt, batch_size, use_usm_batch);
+ if (IS_ERR(bb)) {
+ err = PTR_ERR(bb);
+ return ERR_PTR(err);
+ }
+
+ build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
+ sram_addr, len + sram_offset);
+
+ if (dir == XE_MIGRATE_COPY_TO_VRAM) {
+ src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
+ dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
+
+ } else {
+ src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
+ dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
+ }
+
+ bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
+ update_idx = bb->len;
+
+ emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, len, pitch);
+
+ job = xe_bb_create_migration_job(m->q, bb,
+ xe_migrate_batch_base(m, use_usm_batch),
+ update_idx);
+ if (IS_ERR(job)) {
+ err = PTR_ERR(job);
+ goto err;
+ }
+
+ xe_sched_job_add_migrate_flush(job, 0);
+
+ mutex_lock(&m->job_mutex);
+ xe_sched_job_arm(job);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ dma_fence_put(m->fence);
+ m->fence = dma_fence_get(fence);
+ mutex_unlock(&m->job_mutex);
+
+ xe_bb_free(bb, fence);
+
+ return fence;
+
+err:
+ xe_bb_free(bb, NULL);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * xe_migrate_to_vram() - Migrate to VRAM
+ * @m: The migration context.
+ * @npages: Number of pages to migrate.
+ * @src_addr: Array of dma addresses (source of migrate)
+ * @dst_addr: Device physical address of VRAM (destination of migrate)
+ *
+ * Copy from an array dma addresses to a VRAM device physical address
+ *
+ * Return: dma fence for migrate to signal completion on succees, ERR_PTR on
+ * failure
+ */
+struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
+ unsigned long npages,
+ dma_addr_t *src_addr,
+ u64 dst_addr)
+{
+ return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
+ XE_MIGRATE_COPY_TO_VRAM);
+}
+
+/**
+ * xe_migrate_from_vram() - Migrate from VRAM
+ * @m: The migration context.
+ * @npages: Number of pages to migrate.
+ * @src_addr: Device physical address of VRAM (source of migrate)
+ * @dst_addr: Array of dma addresses (destination of migrate)
+ *
+ * Copy from a VRAM device physical address to an array dma addresses
+ *
+ * Return: dma fence for migrate to signal completion on succees, ERR_PTR on
+ * failure
+ */
+struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
+ unsigned long npages,
+ u64 src_addr,
+ dma_addr_t *dst_addr)
+{
+ return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
+ XE_MIGRATE_COPY_TO_SRAM);
+}
+
+static void xe_migrate_dma_unmap(struct xe_device *xe, dma_addr_t *dma_addr,
+ int len, int write)
+{
+ unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
+
+ for (i = 0; i < npages; ++i) {
+ if (!dma_addr[i])
+ break;
+
+ dma_unmap_page(xe->drm.dev, dma_addr[i], PAGE_SIZE,
+ write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ }
+ kfree(dma_addr);
+}
+
+static dma_addr_t *xe_migrate_dma_map(struct xe_device *xe,
+ void *buf, int len, int write)
+{
+ dma_addr_t *dma_addr;
+ unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
+
+ dma_addr = kcalloc(npages, sizeof(*dma_addr), GFP_KERNEL);
+ if (!dma_addr)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < npages; ++i) {
+ dma_addr_t addr;
+ struct page *page;
+
+ if (is_vmalloc_addr(buf))
+ page = vmalloc_to_page(buf);
+ else
+ page = virt_to_page(buf);
+
+ addr = dma_map_page(xe->drm.dev,
+ page, 0, PAGE_SIZE,
+ write ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(xe->drm.dev, addr))
+ goto err_fault;
+
+ dma_addr[i] = addr;
+ buf += PAGE_SIZE;
+ }
+
+ return dma_addr;
+
+err_fault:
+ xe_migrate_dma_unmap(xe, dma_addr, len, write);
+ return ERR_PTR(-EFAULT);
+}
+
+/**
+ * xe_migrate_access_memory - Access memory of a BO via GPU
+ *
+ * @m: The migration context.
+ * @bo: buffer object
+ * @offset: access offset into buffer object
+ * @buf: pointer to caller memory to read into or write from
+ * @len: length of access
+ * @write: write access
+ *
+ * Access memory of a BO via GPU either reading in or writing from a passed in
+ * pointer. Pointer is dma mapped for GPU access and GPU commands are issued to
+ * read to or write from pointer.
+ *
+ * Returns:
+ * 0 if successful, negative error code on failure.
+ */
+int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
+ unsigned long offset, void *buf, int len,
+ int write)
+{
+ struct xe_tile *tile = m->tile;
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_res_cursor cursor;
+ struct dma_fence *fence = NULL;
+ dma_addr_t *dma_addr;
+ unsigned long page_offset = (unsigned long)buf & ~PAGE_MASK;
+ int bytes_left = len, current_page = 0;
+ void *orig_buf = buf;
+
+ xe_bo_assert_held(bo);
+
+ /* Use bounce buffer for small access and unaligned access */
+ if (len & XE_CACHELINE_MASK ||
+ ((uintptr_t)buf | offset) & XE_CACHELINE_MASK) {
+ int buf_offset = 0;
+
+ /*
+ * Less than ideal for large unaligned access but this should be
+ * fairly rare, can fixup if this becomes common.
+ */
+ do {
+ u8 bounce[XE_CACHELINE_BYTES];
+ void *ptr = (void *)bounce;
+ int err;
+ int copy_bytes = min_t(int, bytes_left,
+ XE_CACHELINE_BYTES -
+ (offset & XE_CACHELINE_MASK));
+ int ptr_offset = offset & XE_CACHELINE_MASK;
+
+ err = xe_migrate_access_memory(m, bo,
+ offset &
+ ~XE_CACHELINE_MASK,
+ (void *)ptr,
+ sizeof(bounce), 0);
+ if (err)
+ return err;
+
+ if (write) {
+ memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes);
+
+ err = xe_migrate_access_memory(m, bo,
+ offset & ~XE_CACHELINE_MASK,
+ (void *)ptr,
+ sizeof(bounce), 0);
+ if (err)
+ return err;
+ } else {
+ memcpy(buf + buf_offset, ptr + ptr_offset,
+ copy_bytes);
+ }
+
+ bytes_left -= copy_bytes;
+ buf_offset += copy_bytes;
+ offset += copy_bytes;
+ } while (bytes_left);
+
+ return 0;
+ }
+
+ dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
+ if (IS_ERR(dma_addr))
+ return PTR_ERR(dma_addr);
+
+ xe_res_first(bo->ttm.resource, offset, bo->size - offset, &cursor);
+
+ do {
+ struct dma_fence *__fence;
+ u64 vram_addr = vram_region_gpu_offset(bo->ttm.resource) +
+ cursor.start;
+ int current_bytes;
+
+ if (cursor.size > MAX_PREEMPTDISABLE_TRANSFER)
+ current_bytes = min_t(int, bytes_left,
+ MAX_PREEMPTDISABLE_TRANSFER);
+ else
+ current_bytes = min_t(int, bytes_left, cursor.size);
+
+ if (fence)
+ dma_fence_put(fence);
+
+ __fence = xe_migrate_vram(m, current_bytes,
+ (unsigned long)buf & ~PAGE_MASK,
+ dma_addr + current_page,
+ vram_addr, write ?
+ XE_MIGRATE_COPY_TO_VRAM :
+ XE_MIGRATE_COPY_TO_SRAM);
+ if (IS_ERR(__fence)) {
+ if (fence)
+ dma_fence_wait(fence, false);
+ fence = __fence;
+ goto out_err;
+ }
+ fence = __fence;
+
+ buf += current_bytes;
+ offset += current_bytes;
+ current_page = (int)(buf - orig_buf) / PAGE_SIZE;
+ bytes_left -= current_bytes;
+ if (bytes_left)
+ xe_res_next(&cursor, current_bytes);
+ } while (bytes_left);
+
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+
+out_err:
+ xe_migrate_dma_unmap(xe, dma_addr, len + page_offset, write);
+ return IS_ERR(fence) ? PTR_ERR(fence) : 0;
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
#include "tests/xe_migrate.c"
#endif
diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
index 0109866e398a..fb9839c1bae0 100644
--- a/drivers/gpu/drm/xe/xe_migrate.h
+++ b/drivers/gpu/drm/xe/xe_migrate.h
@@ -95,6 +95,16 @@ struct xe_migrate_pt_update {
struct xe_migrate *xe_migrate_init(struct xe_tile *tile);
+struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
+ unsigned long npages,
+ dma_addr_t *src_addr,
+ u64 dst_addr);
+
+struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
+ unsigned long npages,
+ u64 src_addr,
+ dma_addr_t *dst_addr);
+
struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct xe_bo *src_bo,
struct xe_bo *dst_bo,
@@ -102,6 +112,10 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct ttm_resource *dst,
bool copy_only_ccs);
+int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
+ unsigned long offset, void *buf, int len,
+ int write);
+
#define XE_MIGRATE_CLEAR_FLAG_BO_DATA BIT(0)
#define XE_MIGRATE_CLEAR_FLAG_CCS_DATA BIT(1)
#define XE_MIGRATE_CLEAR_FLAG_FULL (XE_MIGRATE_CLEAR_FLAG_BO_DATA | \
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index 3fd462fda625..7357458bc0d2 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -36,46 +36,51 @@ static void tiles_fini(void *arg)
/*
* On multi-tile devices, partition the BAR space for MMIO on each tile,
* possibly accounting for register override on the number of tiles available.
+ * tile_mmio_size contains both the tile's 4MB register space, as well as
+ * additional space for the GTT and other (possibly unused) regions).
* Resulting memory layout is like below:
*
* .----------------------. <- tile_count * tile_mmio_size
* | .... |
* |----------------------| <- 2 * tile_mmio_size
+ * | tile1 GTT + other |
+ * |----------------------| <- 1 * tile_mmio_size + 4MB
* | tile1->mmio.regs |
* |----------------------| <- 1 * tile_mmio_size
+ * | tile0 GTT + other |
+ * |----------------------| <- 4MB
* | tile0->mmio.regs |
* '----------------------' <- 0MB
*/
static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
{
struct xe_tile *tile;
- void __iomem *regs;
u8 id;
/*
* Nothing to be done as tile 0 has already been setup earlier with the
- * entire BAR mapped - see xe_mmio_init()
+ * entire BAR mapped - see xe_mmio_probe_early()
*/
if (xe->info.tile_count == 1)
return;
/* Possibly override number of tile based on configuration register */
if (!xe->info.skip_mtcfg) {
- struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
u8 tile_count;
u32 mtcfg;
/*
* Although the per-tile mmio regs are not yet initialized, this
- * is fine as it's going to the root gt, that's guaranteed to be
- * initialized earlier in xe_mmio_init()
+ * is fine as it's going to the root tile's mmio, that's
+ * guaranteed to be initialized earlier in xe_mmio_probe_early()
*/
- mtcfg = xe_mmio_read64_2x32(gt, XEHP_MTCFG_ADDR);
+ mtcfg = xe_mmio_read32(mmio, XEHP_MTCFG_ADDR);
tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
if (tile_count < xe->info.tile_count) {
drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
- xe->info.tile_count, tile_count);
+ xe->info.tile_count, tile_count);
xe->info.tile_count = tile_count;
/*
@@ -88,57 +93,15 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
}
}
- regs = xe->mmio.regs;
- for_each_tile(tile, xe, id) {
- tile->mmio.size = tile_mmio_size;
- tile->mmio.regs = regs;
- regs += tile_mmio_size;
- }
-}
-
-/*
- * On top of all the multi-tile MMIO space there can be a platform-dependent
- * extension for each tile, resulting in a layout like below:
- *
- * .----------------------. <- ext_base + tile_count * tile_mmio_ext_size
- * | .... |
- * |----------------------| <- ext_base + 2 * tile_mmio_ext_size
- * | tile1->mmio_ext.regs |
- * |----------------------| <- ext_base + 1 * tile_mmio_ext_size
- * | tile0->mmio_ext.regs |
- * |======================| <- ext_base = tile_count * tile_mmio_size
- * | |
- * | mmio.regs |
- * | |
- * '----------------------' <- 0MB
- *
- * Set up the tile[]->mmio_ext pointers/sizes.
- */
-static void mmio_extension_setup(struct xe_device *xe, size_t tile_mmio_size,
- size_t tile_mmio_ext_size)
-{
- struct xe_tile *tile;
- void __iomem *regs;
- u8 id;
-
- if (!xe->info.has_mmio_ext)
- return;
-
- regs = xe->mmio.regs + tile_mmio_size * xe->info.tile_count;
- for_each_tile(tile, xe, id) {
- tile->mmio_ext.size = tile_mmio_ext_size;
- tile->mmio_ext.regs = regs;
- regs += tile_mmio_ext_size;
- }
+ for_each_remote_tile(tile, xe, id)
+ xe_mmio_init(&tile->mmio, tile, xe->mmio.regs + id * tile_mmio_size, SZ_4M);
}
int xe_mmio_probe_tiles(struct xe_device *xe)
{
size_t tile_mmio_size = SZ_16M;
- size_t tile_mmio_ext_size = xe->info.tile_mmio_ext_size;
mmio_multi_tile_setup(xe, tile_mmio_size);
- mmio_extension_setup(xe, tile_mmio_size, tile_mmio_ext_size);
return devm_add_action_or_reset(xe->drm.dev, tiles_fini, xe);
}
@@ -153,141 +116,155 @@ static void mmio_fini(void *arg)
root_tile->mmio.regs = NULL;
}
-int xe_mmio_init(struct xe_device *xe)
+int xe_mmio_probe_early(struct xe_device *xe)
{
struct xe_tile *root_tile = xe_device_get_root_tile(xe);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
- const int mmio_bar = 0;
/*
* Map the entire BAR.
* The first 16MB of the BAR, belong to the root tile, and include:
* registers (0-4MB), reserved space (4MB-8MB) and GGTT (8MB-16MB).
*/
- xe->mmio.size = pci_resource_len(pdev, mmio_bar);
- xe->mmio.regs = pci_iomap(pdev, mmio_bar, GTTMMADR_BAR);
- if (xe->mmio.regs == NULL) {
+ xe->mmio.size = pci_resource_len(pdev, GTTMMADR_BAR);
+ xe->mmio.regs = pci_iomap(pdev, GTTMMADR_BAR, 0);
+ if (!xe->mmio.regs) {
drm_err(&xe->drm, "failed to map registers\n");
return -EIO;
}
/* Setup first tile; other tiles (if present) will be setup later. */
- root_tile->mmio.size = SZ_16M;
- root_tile->mmio.regs = xe->mmio.regs;
+ xe_mmio_init(&root_tile->mmio, root_tile, xe->mmio.regs, SZ_4M);
return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe);
}
+ALLOW_ERROR_INJECTION(xe_mmio_probe_early, ERRNO); /* See xe_pci_probe() */
+
+/**
+ * xe_mmio_init() - Initialize an MMIO instance
+ * @mmio: Pointer to the MMIO instance to initialize
+ * @tile: The tile to which the MMIO region belongs
+ * @ptr: Pointer to the start of the MMIO region
+ * @size: The size of the MMIO region in bytes
+ *
+ * This is a convenience function for minimal initialization of struct xe_mmio.
+ */
+void xe_mmio_init(struct xe_mmio *mmio, struct xe_tile *tile, void __iomem *ptr, u32 size)
+{
+ xe_tile_assert(tile, size <= XE_REG_ADDR_MAX);
+
+ mmio->regs = ptr;
+ mmio->regs_size = size;
+ mmio->tile = tile;
+}
-static void mmio_flush_pending_writes(struct xe_gt *gt)
+static void mmio_flush_pending_writes(struct xe_mmio *mmio)
{
#define DUMMY_REG_OFFSET 0x130030
- struct xe_tile *tile = gt_to_tile(gt);
int i;
- if (tile->xe->info.platform != XE_LUNARLAKE)
+ if (mmio->tile->xe->info.platform != XE_LUNARLAKE)
return;
/* 4 dummy writes */
for (i = 0; i < 4; i++)
- writel(0, tile->mmio.regs + DUMMY_REG_OFFSET);
+ writel(0, mmio->regs + DUMMY_REG_OFFSET);
}
-u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
+u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
{
- struct xe_tile *tile = gt_to_tile(gt);
- u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u8 val;
/* Wa_15015404425 */
- mmio_flush_pending_writes(gt);
+ mmio_flush_pending_writes(mmio);
- val = readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
- trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
+ val = readb(mmio->regs + addr);
+ trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
return val;
}
-u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg)
+u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
{
- struct xe_tile *tile = gt_to_tile(gt);
- u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u16 val;
/* Wa_15015404425 */
- mmio_flush_pending_writes(gt);
+ mmio_flush_pending_writes(mmio);
- val = readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
- trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
+ val = readw(mmio->regs + addr);
+ trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
return val;
}
-void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val)
+void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val)
{
- struct xe_tile *tile = gt_to_tile(gt);
- u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
- trace_xe_reg_rw(gt, true, addr, val, sizeof(val));
+ trace_xe_reg_rw(mmio, true, addr, val, sizeof(val));
- if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
- xe_gt_sriov_vf_write32(gt, reg, val);
+ if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
+ xe_gt_sriov_vf_write32(mmio->sriov_vf_gt ?:
+ mmio->tile->primary_gt, reg, val);
else
- writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
+ writel(val, mmio->regs + addr);
}
-u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
+u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg)
{
- struct xe_tile *tile = gt_to_tile(gt);
- u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
u32 val;
/* Wa_15015404425 */
- mmio_flush_pending_writes(gt);
+ mmio_flush_pending_writes(mmio);
- if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
- val = xe_gt_sriov_vf_read32(gt, reg);
+ if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe))
+ val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt ?:
+ mmio->tile->primary_gt, reg);
else
- val = readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
+ val = readl(mmio->regs + addr);
- trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
+ trace_xe_reg_rw(mmio, false, addr, val, sizeof(val));
return val;
}
-u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set)
+u32 xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set)
{
u32 old, reg_val;
- old = xe_mmio_read32(gt, reg);
+ old = xe_mmio_read32(mmio, reg);
reg_val = (old & ~clr) | set;
- xe_mmio_write32(gt, reg, reg_val);
+ xe_mmio_write32(mmio, reg, reg_val);
return old;
}
-int xe_mmio_write32_and_verify(struct xe_gt *gt,
+int xe_mmio_write32_and_verify(struct xe_mmio *mmio,
struct xe_reg reg, u32 val, u32 mask, u32 eval)
{
u32 reg_val;
- xe_mmio_write32(gt, reg, val);
- reg_val = xe_mmio_read32(gt, reg);
+ xe_mmio_write32(mmio, reg, val);
+ reg_val = xe_mmio_read32(mmio, reg);
return (reg_val & mask) != eval ? -EINVAL : 0;
}
-bool xe_mmio_in_range(const struct xe_gt *gt,
+bool xe_mmio_in_range(const struct xe_mmio *mmio,
const struct xe_mmio_range *range,
struct xe_reg reg)
{
- u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
+ u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
return range && addr >= range->start && addr <= range->end;
}
/**
* xe_mmio_read64_2x32() - Read a 64-bit register as two 32-bit reads
- * @gt: MMIO target GT
+ * @mmio: MMIO target
* @reg: register to read value from
*
* Although Intel GPUs have some 64-bit registers, the hardware officially
@@ -307,21 +284,21 @@ bool xe_mmio_in_range(const struct xe_gt *gt,
*
* Returns the value of the 64-bit register.
*/
-u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg)
+u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg)
{
struct xe_reg reg_udw = { .addr = reg.addr + 0x4 };
u32 ldw, udw, oldudw, retries;
- reg.addr = xe_mmio_adjusted_addr(gt, reg.addr);
- reg_udw.addr = xe_mmio_adjusted_addr(gt, reg_udw.addr);
+ reg.addr = xe_mmio_adjusted_addr(mmio, reg.addr);
+ reg_udw.addr = xe_mmio_adjusted_addr(mmio, reg_udw.addr);
/* we shouldn't adjust just one register address */
- xe_gt_assert(gt, reg_udw.addr == reg.addr + 0x4);
+ xe_tile_assert(mmio->tile, reg_udw.addr == reg.addr + 0x4);
- oldudw = xe_mmio_read32(gt, reg_udw);
+ oldudw = xe_mmio_read32(mmio, reg_udw);
for (retries = 5; retries; --retries) {
- ldw = xe_mmio_read32(gt, reg);
- udw = xe_mmio_read32(gt, reg_udw);
+ ldw = xe_mmio_read32(mmio, reg);
+ udw = xe_mmio_read32(mmio, reg_udw);
if (udw == oldudw)
break;
@@ -329,14 +306,14 @@ u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg)
oldudw = udw;
}
- xe_gt_WARN(gt, retries == 0,
- "64-bit read of %#x did not stabilize\n", reg.addr);
+ drm_WARN(&mmio->tile->xe->drm, retries == 0,
+ "64-bit read of %#x did not stabilize\n", reg.addr);
return (u64)udw << 32 | ldw;
}
-static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
- u32 *out_val, bool atomic, bool expect_match)
+static int __xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val,
+ u32 timeout_us, u32 *out_val, bool atomic, bool expect_match)
{
ktime_t cur = ktime_get_raw();
const ktime_t end = ktime_add_us(cur, timeout_us);
@@ -346,7 +323,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
bool check;
for (;;) {
- read = xe_mmio_read32(gt, reg);
+ read = xe_mmio_read32(mmio, reg);
check = (read & mask) == val;
if (!expect_match)
@@ -372,7 +349,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
}
if (ret != 0) {
- read = xe_mmio_read32(gt, reg);
+ read = xe_mmio_read32(mmio, reg);
check = (read & mask) == val;
if (!expect_match)
@@ -390,7 +367,7 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
/**
* xe_mmio_wait32() - Wait for a register to match the desired masked value
- * @gt: MMIO target GT
+ * @mmio: MMIO target
* @reg: register to read value from
* @mask: mask to be applied to the value read from the register
* @val: desired value after applying the mask
@@ -407,15 +384,15 @@ static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 v
* @timeout_us for different reasons, specially in non-atomic contexts. Thus,
* it is possible that this function succeeds even after @timeout_us has passed.
*/
-int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
+int xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
u32 *out_val, bool atomic)
{
- return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, true);
+ return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, true);
}
/**
* xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value
- * @gt: MMIO target GT
+ * @mmio: MMIO target
* @reg: register to read value from
* @mask: mask to be applied to the value read from the register
* @val: value not to be matched after applying the mask
@@ -426,8 +403,8 @@ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 t
* This function works exactly like xe_mmio_wait32() with the exception that
* @val is expected not to be matched.
*/
-int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
+int xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
u32 *out_val, bool atomic)
{
- return __xe_mmio_wait32(gt, reg, mask, val, timeout_us, out_val, atomic, false);
+ return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, false);
}
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
index 26551410ecc8..c151ba569003 100644
--- a/drivers/gpu/drm/xe/xe_mmio.h
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -11,28 +11,35 @@
struct xe_device;
struct xe_reg;
-int xe_mmio_init(struct xe_device *xe);
+int xe_mmio_probe_early(struct xe_device *xe);
int xe_mmio_probe_tiles(struct xe_device *xe);
-u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg);
-u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg);
-void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
-u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg);
-u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set);
-int xe_mmio_write32_and_verify(struct xe_gt *gt, struct xe_reg reg, u32 val, u32 mask, u32 eval);
-bool xe_mmio_in_range(const struct xe_gt *gt, const struct xe_mmio_range *range, struct xe_reg reg);
-
-u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg);
-int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
- u32 *out_val, bool atomic);
-int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
- u32 *out_val, bool atomic);
-
-static inline u32 xe_mmio_adjusted_addr(const struct xe_gt *gt, u32 addr)
+void xe_mmio_init(struct xe_mmio *mmio, struct xe_tile *tile, void __iomem *ptr, u32 size);
+
+u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg);
+u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg);
+void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val);
+u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg);
+u32 xe_mmio_rmw32(struct xe_mmio *mmio, struct xe_reg reg, u32 clr, u32 set);
+int xe_mmio_write32_and_verify(struct xe_mmio *mmio, struct xe_reg reg, u32 val, u32 mask, u32 eval);
+bool xe_mmio_in_range(const struct xe_mmio *mmio, const struct xe_mmio_range *range, struct xe_reg reg);
+
+u64 xe_mmio_read64_2x32(struct xe_mmio *mmio, struct xe_reg reg);
+int xe_mmio_wait32(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 val,
+ u32 timeout_us, u32 *out_val, bool atomic);
+int xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask,
+ u32 val, u32 timeout_us, u32 *out_val, bool atomic);
+
+static inline u32 xe_mmio_adjusted_addr(const struct xe_mmio *mmio, u32 addr)
{
- if (addr < gt->mmio.adj_limit)
- addr += gt->mmio.adj_offset;
+ if (addr < mmio->adj_limit)
+ addr += mmio->adj_offset;
return addr;
}
+static inline struct xe_mmio *xe_root_tile_mmio(struct xe_device *xe)
+{
+ return &xe->tiles[0].mmio;
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c
index 7ff0ac5b799a..0c737413fcb6 100644
--- a/drivers/gpu/drm/xe/xe_mocs.c
+++ b/drivers/gpu/drm/xe/xe_mocs.c
@@ -278,7 +278,7 @@ static void xelp_lncf_dump(struct xe_mocs_info *info, struct xe_gt *gt, struct d
if (regs_are_mcr(gt))
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i));
else
- reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i));
+ reg_val = xe_mmio_read32(&gt->mmio, XELP_LNCFCMOCS(i));
drm_printf(p, "LNCFCMOCS[%2d] = [%u, %u, %u] (%#8x)\n",
j++,
@@ -310,7 +310,7 @@ static void xelp_mocs_dump(struct xe_mocs_info *info, unsigned int flags,
if (regs_are_mcr(gt))
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i));
else
- reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i));
+ reg_val = xe_mmio_read32(&gt->mmio, XELP_GLOBAL_MOCS(i));
drm_printf(p, "GLOB_MOCS[%2d] = [%u, %u, %u, %u, %u, %u, %u, %u, %u, %u ] (%#8x)\n",
i,
@@ -383,7 +383,7 @@ static void xehp_lncf_dump(struct xe_mocs_info *info, unsigned int flags,
if (regs_are_mcr(gt))
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i));
else
- reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i));
+ reg_val = xe_mmio_read32(&gt->mmio, XELP_LNCFCMOCS(i));
drm_printf(p, "LNCFCMOCS[%2d] = [%u, %u, %u] (%#8x)\n",
j++,
@@ -428,7 +428,7 @@ static void pvc_mocs_dump(struct xe_mocs_info *info, unsigned int flags, struct
if (regs_are_mcr(gt))
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i));
else
- reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i));
+ reg_val = xe_mmio_read32(&gt->mmio, XELP_LNCFCMOCS(i));
drm_printf(p, "LNCFCMOCS[%2d] = [ %u ] (%#8x)\n",
j++,
@@ -510,7 +510,7 @@ static void mtl_mocs_dump(struct xe_mocs_info *info, unsigned int flags,
if (regs_are_mcr(gt))
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i));
else
- reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i));
+ reg_val = xe_mmio_read32(&gt->mmio, XELP_GLOBAL_MOCS(i));
drm_printf(p, "GLOB_MOCS[%2d] = [%u, %u] (%#8x)\n",
i,
@@ -553,7 +553,7 @@ static void xe2_mocs_dump(struct xe_mocs_info *info, unsigned int flags,
if (regs_are_mcr(gt))
reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i));
else
- reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i));
+ reg_val = xe_mmio_read32(&gt->mmio, XELP_GLOBAL_MOCS(i));
drm_printf(p, "GLOB_MOCS[%2d] = [%u, %u, %u] (%#8x)\n",
i,
@@ -576,6 +576,7 @@ static unsigned int get_mocs_settings(struct xe_device *xe,
memset(info, 0, sizeof(struct xe_mocs_info));
switch (xe->info.platform) {
+ case XE_PANTHERLAKE:
case XE_LUNARLAKE:
case XE_BATTLEMAGE:
info->ops = &xe2_mocs_ops;
@@ -690,7 +691,7 @@ static void __init_mocs_table(struct xe_gt *gt,
if (regs_are_mcr(gt))
xe_gt_mcr_multicast_write(gt, XEHP_GLOBAL_MOCS(i), mocs);
else
- xe_mmio_write32(gt, XELP_GLOBAL_MOCS(i), mocs);
+ xe_mmio_write32(&gt->mmio, XELP_GLOBAL_MOCS(i), mocs);
}
}
@@ -730,7 +731,7 @@ static void init_l3cc_table(struct xe_gt *gt,
if (regs_are_mcr(gt))
xe_gt_mcr_multicast_write(gt, XEHP_LNCFCMOCS(i), l3cc);
else
- xe_mmio_write32(gt, XELP_LNCFCMOCS(i), l3cc);
+ xe_mmio_write32(&gt->mmio, XELP_LNCFCMOCS(i), l3cc);
}
}
@@ -773,25 +774,24 @@ void xe_mocs_init(struct xe_gt *gt)
void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_mocs_info table;
- unsigned int flags;
- u32 ret;
struct xe_device *xe = gt_to_xe(gt);
+ enum xe_force_wake_domains domain;
+ struct xe_mocs_info table;
+ unsigned int fw_ref, flags;
flags = get_mocs_settings(xe, &table);
+ domain = flags & HAS_LNCF_MOCS ? XE_FORCEWAKE_ALL : XE_FW_GT;
xe_pm_runtime_get_noresume(xe);
- ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), domain);
- if (ret)
+ if (!xe_force_wake_ref_has_domain(fw_ref, domain))
goto err_fw;
table.ops->dump(&table, flags, gt, p);
- xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
-
err_fw:
- xe_assert(xe, !ret);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(xe);
}
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index bfc3deebdaa2..e4742e27e2cd 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -11,6 +11,7 @@
#include <drm/drm_module.h>
#include "xe_drv.h"
+#include "xe_configfs.h"
#include "xe_hw_fence.h"
#include "xe_pci.h"
#include "xe_pm.h"
@@ -19,20 +20,24 @@
struct xe_modparam xe_modparam = {
.probe_display = true,
- .guc_log_level = 5,
+ .guc_log_level = 3,
.force_probe = CONFIG_DRM_XE_FORCE_PROBE,
.wedged_mode = 1,
+ .svm_notifier_size = 512,
/* the rest are 0 by default */
};
+module_param_named(svm_notifier_size, xe_modparam.svm_notifier_size, uint, 0600);
+MODULE_PARM_DESC(svm_notifier_size, "Set the svm notifier size(in MiB), must be power of 2");
+
module_param_named_unsafe(force_execlist, xe_modparam.force_execlist, bool, 0444);
MODULE_PARM_DESC(force_execlist, "Force Execlist submission");
module_param_named(probe_display, xe_modparam.probe_display, bool, 0444);
MODULE_PARM_DESC(probe_display, "Probe display HW, otherwise it's left untouched (default: true)");
-module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, uint, 0600);
-MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size(in MiB)");
+module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, int, 0600);
+MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size (in MiB) - <0=disable-resize, 0=max-needed-size[default], >0=force-size");
module_param_named(guc_log_level, xe_modparam.guc_log_level, int, 0600);
MODULE_PARM_DESC(guc_log_level, "GuC firmware logging level (0=disable, 1..5=enable with verbosity min..max)");
@@ -77,15 +82,15 @@ struct init_funcs {
void (*exit)(void);
};
-static void xe_dummy_exit(void)
-{
-}
-
static const struct init_funcs init_funcs[] = {
{
.init = xe_check_nomodeset,
},
{
+ .init = xe_configfs_init,
+ .exit = xe_configfs_exit,
+ },
+ {
.init = xe_hw_fence_module_init,
.exit = xe_hw_fence_module_exit,
},
@@ -103,7 +108,6 @@ static const struct init_funcs init_funcs[] = {
},
{
.init = xe_pm_module_init,
- .exit = xe_dummy_exit,
},
};
diff --git a/drivers/gpu/drm/xe/xe_module.h b/drivers/gpu/drm/xe/xe_module.h
index 161a5e6f717f..5a3bfea8b7b4 100644
--- a/drivers/gpu/drm/xe/xe_module.h
+++ b/drivers/gpu/drm/xe/xe_module.h
@@ -22,6 +22,7 @@ struct xe_modparam {
unsigned int max_vfs;
#endif
int wedged_mode;
+ u32 svm_notifier_size;
};
extern struct xe_modparam xe_modparam;
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 2804f14f8f29..fb842fa0552e 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -12,11 +12,12 @@
#include <drm/drm_managed.h>
#include <uapi/drm/xe_drm.h>
+#include <generated/xe_wa_oob.h>
+
#include "abi/guc_actions_slpc_abi.h"
#include "instructions/xe_mi_commands.h"
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
-#include "regs/xe_lrc_layout.h"
#include "regs/xe_oa_regs.h"
#include "xe_assert.h"
#include "xe_bb.h"
@@ -28,7 +29,6 @@
#include "xe_gt_mcr.h"
#include "xe_gt_printk.h"
#include "xe_guc_pc.h"
-#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_mmio.h"
#include "xe_oa.h"
@@ -36,11 +36,23 @@
#include "xe_pm.h"
#include "xe_sched_job.h"
#include "xe_sriov.h"
+#include "xe_sync.h"
+#include "xe_wa.h"
#define DEFAULT_POLL_FREQUENCY_HZ 200
#define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
#define XE_OA_UNIT_INVALID U32_MAX
+enum xe_oa_submit_deps {
+ XE_OA_SUBMIT_NO_DEPS,
+ XE_OA_SUBMIT_ADD_DEPS,
+};
+
+enum xe_oa_user_extn_from {
+ XE_OA_USER_EXTN_FROM_OPEN,
+ XE_OA_USER_EXTN_FROM_CONFIG,
+};
+
struct xe_oa_reg {
struct xe_reg addr;
u32 value;
@@ -63,13 +75,8 @@ struct xe_oa_config {
struct rcu_head rcu;
};
-struct flex {
- struct xe_reg reg;
- u32 offset;
- u32 value;
-};
-
struct xe_oa_open_param {
+ struct xe_file *xef;
u32 oa_unit_id;
bool sample;
u32 metric_set;
@@ -81,6 +88,11 @@ struct xe_oa_open_param {
struct xe_exec_queue *exec_q;
struct xe_hw_engine *hwe;
bool no_preempt;
+ struct drm_xe_sync __user *syncs_user;
+ int num_syncs;
+ struct xe_sync_entry *syncs;
+ size_t oa_buffer_size;
+ int wait_num_reports;
};
struct xe_oa_config_bo {
@@ -90,6 +102,17 @@ struct xe_oa_config_bo {
struct xe_bb *bb;
};
+struct xe_oa_fence {
+ /* @base: dma fence base */
+ struct dma_fence base;
+ /* @lock: lock for the fence */
+ spinlock_t lock;
+ /* @work: work to signal @base */
+ struct delayed_work work;
+ /* @cb: callback to schedule @work */
+ struct dma_fence_cb cb;
+};
+
#define DRM_FMT(x) DRM_XE_OA_FMT_TYPE_##x
static const struct xe_oa_format oa_formats[] = {
@@ -162,10 +185,10 @@ static struct xe_oa_config *xe_oa_get_oa_config(struct xe_oa *oa, int metrics_se
return oa_config;
}
-static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo)
+static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo, struct dma_fence *last_fence)
{
xe_oa_config_put(oa_bo->oa_config);
- xe_bb_free(oa_bo->bb, NULL);
+ xe_bb_free(oa_bo->bb, last_fence);
kfree(oa_bo);
}
@@ -176,7 +199,7 @@ static const struct xe_oa_regs *__oa_regs(struct xe_oa_stream *stream)
static u32 xe_oa_hw_tail_read(struct xe_oa_stream *stream)
{
- return xe_mmio_read32(stream->gt, __oa_regs(stream)->oa_tail_ptr) &
+ return xe_mmio_read32(&stream->gt->mmio, __oa_regs(stream)->oa_tail_ptr) &
OAG_OATAILPTR_MASK;
}
@@ -214,11 +237,9 @@ static void oa_timestamp_clear(struct xe_oa_stream *stream, u32 *report)
static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
{
u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo);
+ u32 tail, hw_tail, partial_report_size, available;
int report_size = stream->oa_buffer.format->size;
- u32 tail, hw_tail;
unsigned long flags;
- bool pollin;
- u32 partial_report_size;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
@@ -262,12 +283,12 @@ static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
stream->oa_buffer.tail = tail;
- pollin = xe_oa_circ_diff(stream, stream->oa_buffer.tail,
- stream->oa_buffer.head) >= report_size;
+ available = xe_oa_circ_diff(stream, stream->oa_buffer.tail, stream->oa_buffer.head);
+ stream->pollin = available >= stream->wait_num_reports * report_size;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
- return pollin;
+ return stream->pollin;
}
static enum hrtimer_restart xe_oa_poll_check_timer_cb(struct hrtimer *hrtimer)
@@ -275,10 +296,8 @@ static enum hrtimer_restart xe_oa_poll_check_timer_cb(struct hrtimer *hrtimer)
struct xe_oa_stream *stream =
container_of(hrtimer, typeof(*stream), poll_check_timer);
- if (xe_oa_buffer_check_unlocked(stream)) {
- stream->pollin = true;
+ if (xe_oa_buffer_check_unlocked(stream))
wake_up(&stream->poll_wq);
- }
hrtimer_forward_now(hrtimer, ns_to_ktime(stream->poll_period_ns));
@@ -366,7 +385,7 @@ static int xe_oa_append_reports(struct xe_oa_stream *stream, char __user *buf,
struct xe_reg oaheadptr = __oa_regs(stream)->oa_head_ptr;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- xe_mmio_write32(stream->gt, oaheadptr,
+ xe_mmio_write32(&stream->gt->mmio, oaheadptr,
(head + gtt_offset) & OAG_OAHEADPTR_MASK);
stream->oa_buffer.head = head;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
@@ -378,21 +397,30 @@ static int xe_oa_append_reports(struct xe_oa_stream *stream, char __user *buf,
static void xe_oa_init_oa_buffer(struct xe_oa_stream *stream)
{
u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo);
- u32 oa_buf = gtt_offset | OABUFFER_SIZE_16M | OAG_OABUFFER_MEMORY_SELECT;
+ int size_exponent = __ffs(stream->oa_buffer.bo->size);
+ u32 oa_buf = gtt_offset | OAG_OABUFFER_MEMORY_SELECT;
+ struct xe_mmio *mmio = &stream->gt->mmio;
unsigned long flags;
+ /*
+ * If oa buffer size is more than 16MB (exponent greater than 24), the
+ * oa buffer size field is multiplied by 8 in xe_oa_enable_metric_set.
+ */
+ oa_buf |= REG_FIELD_PREP(OABUFFER_SIZE_MASK,
+ size_exponent > 24 ? size_exponent - 20 : size_exponent - 17);
+
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_status, 0);
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_head_ptr,
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_status, 0);
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_head_ptr,
gtt_offset & OAG_OAHEADPTR_MASK);
stream->oa_buffer.head = 0;
/*
* PRM says: "This MMIO must be set before the OATAILPTR register and after the
* OAHEADPTR register. This is to enable proper functionality of the overflow bit".
*/
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_buffer, oa_buf);
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_tail_ptr,
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_buffer, oa_buf);
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_tail_ptr,
gtt_offset & OAG_OATAILPTR_MASK);
/* Mark that we need updated tail pointer to read from */
@@ -424,6 +452,12 @@ static u32 __oa_ccs_select(struct xe_oa_stream *stream)
return val;
}
+static u32 __oactrl_used_bits(struct xe_oa_stream *stream)
+{
+ return stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ?
+ OAG_OACONTROL_USED_BITS : OAM_OACONTROL_USED_BITS;
+}
+
static void xe_oa_enable(struct xe_oa_stream *stream)
{
const struct xe_oa_format *format = stream->oa_buffer.format;
@@ -444,21 +478,23 @@ static void xe_oa_enable(struct xe_oa_stream *stream)
stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG)
val |= OAG_OACONTROL_OA_PES_DISAG_EN;
- xe_mmio_write32(stream->gt, regs->oa_ctrl, val);
+ xe_mmio_rmw32(&stream->gt->mmio, regs->oa_ctrl, __oactrl_used_bits(stream), val);
}
static void xe_oa_disable(struct xe_oa_stream *stream)
{
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctrl, 0);
- if (xe_mmio_wait32(stream->gt, __oa_regs(stream)->oa_ctrl,
+ struct xe_mmio *mmio = &stream->gt->mmio;
+
+ xe_mmio_rmw32(mmio, __oa_regs(stream)->oa_ctrl, __oactrl_used_bits(stream), 0);
+ if (xe_mmio_wait32(mmio, __oa_regs(stream)->oa_ctrl,
OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 50000, NULL, false))
drm_err(&stream->oa->xe->drm,
"wait for OA to be disabled timed out\n");
if (GRAPHICS_VERx100(stream->oa->xe) <= 1270 && GRAPHICS_VERx100(stream->oa->xe) != 1260) {
/* <= XE_METEORLAKE except XE_PVC */
- xe_mmio_write32(stream->gt, OA_TLB_INV_CR, 1);
- if (xe_mmio_wait32(stream->gt, OA_TLB_INV_CR, 1, 0, 50000, NULL, false))
+ xe_mmio_write32(mmio, OA_TLB_INV_CR, 1);
+ if (xe_mmio_wait32(mmio, OA_TLB_INV_CR, 1, 0, 50000, NULL, false))
drm_err(&stream->oa->xe->drm,
"wait for OA tlb invalidate timed out\n");
}
@@ -481,7 +517,7 @@ static int __xe_oa_read(struct xe_oa_stream *stream, char __user *buf,
size_t count, size_t *offset)
{
/* Only clear our bits to avoid side-effects */
- stream->oa_status = xe_mmio_rmw32(stream->gt, __oa_regs(stream)->oa_status,
+ stream->oa_status = xe_mmio_rmw32(&stream->gt->mmio, __oa_regs(stream)->oa_status,
OASTATUS_RELEVANT_BITS, 0);
/*
* Signal to userspace that there is non-zero OA status to read via
@@ -515,6 +551,7 @@ static ssize_t xe_oa_read(struct file *file, char __user *buf,
mutex_unlock(&stream->stream_lock);
} while (!offset && !ret);
} else {
+ xe_oa_buffer_check_unlocked(stream);
mutex_lock(&stream->stream_lock);
ret = __xe_oa_read(stream, buf, count, &offset);
mutex_unlock(&stream->stream_lock);
@@ -567,32 +604,60 @@ static __poll_t xe_oa_poll(struct file *file, poll_table *wait)
return ret;
}
-static int xe_oa_submit_bb(struct xe_oa_stream *stream, struct xe_bb *bb)
+static void xe_oa_lock_vma(struct xe_exec_queue *q)
{
+ if (q->vm) {
+ down_read(&q->vm->lock);
+ xe_vm_lock(q->vm, false);
+ }
+}
+
+static void xe_oa_unlock_vma(struct xe_exec_queue *q)
+{
+ if (q->vm) {
+ xe_vm_unlock(q->vm);
+ up_read(&q->vm->lock);
+ }
+}
+
+static struct dma_fence *xe_oa_submit_bb(struct xe_oa_stream *stream, enum xe_oa_submit_deps deps,
+ struct xe_bb *bb)
+{
+ struct xe_exec_queue *q = stream->exec_q ?: stream->k_exec_q;
struct xe_sched_job *job;
struct dma_fence *fence;
- long timeout;
int err = 0;
- /* Kernel configuration is issued on stream->k_exec_q, not stream->exec_q */
- job = xe_bb_create_job(stream->k_exec_q, bb);
+ xe_oa_lock_vma(q);
+
+ job = xe_bb_create_job(q, bb);
if (IS_ERR(job)) {
err = PTR_ERR(job);
goto exit;
}
+ job->ggtt = true;
+
+ if (deps == XE_OA_SUBMIT_ADD_DEPS) {
+ for (int i = 0; i < stream->num_syncs && !err; i++)
+ err = xe_sync_entry_add_deps(&stream->syncs[i], job);
+ if (err) {
+ drm_dbg(&stream->oa->xe->drm, "xe_sync_entry_add_deps err %d\n", err);
+ goto err_put_job;
+ }
+ }
xe_sched_job_arm(job);
fence = dma_fence_get(&job->drm.s_fence->finished);
xe_sched_job_push(job);
- timeout = dma_fence_wait_timeout(fence, false, HZ);
- dma_fence_put(fence);
- if (timeout < 0)
- err = timeout;
- else if (!timeout)
- err = -ETIME;
+ xe_oa_unlock_vma(q);
+
+ return fence;
+err_put_job:
+ xe_sched_job_put(job);
exit:
- return err;
+ xe_oa_unlock_vma(q);
+ return ERR_PTR(err);
}
static void write_cs_mi_lri(struct xe_bb *bb, const struct xe_oa_reg *reg_data, u32 n_regs)
@@ -636,57 +701,34 @@ static void xe_oa_free_configs(struct xe_oa_stream *stream)
xe_oa_config_put(stream->oa_config);
llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node)
- free_oa_config_bo(oa_bo);
-}
-
-static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc,
- struct xe_bb *bb, const struct flex *flex, u32 count)
-{
- u32 offset = xe_bo_ggtt_addr(lrc->bo);
-
- do {
- bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1);
- bb->cs[bb->len++] = offset + flex->offset * sizeof(u32);
- bb->cs[bb->len++] = 0;
- bb->cs[bb->len++] = flex->value;
-
- } while (flex++, --count);
+ free_oa_config_bo(oa_bo, stream->last_fence);
+ dma_fence_put(stream->last_fence);
}
-static int xe_oa_modify_ctx_image(struct xe_oa_stream *stream, struct xe_lrc *lrc,
- const struct flex *flex, u32 count)
+static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri, u32 count)
{
+ struct dma_fence *fence;
struct xe_bb *bb;
int err;
- bb = xe_bb_new(stream->gt, 4 * count, false);
+ bb = xe_bb_new(stream->gt, 2 * count + 1, false);
if (IS_ERR(bb)) {
err = PTR_ERR(bb);
goto exit;
}
- xe_oa_store_flex(stream, lrc, bb, flex, count);
-
- err = xe_oa_submit_bb(stream, bb);
- xe_bb_free(bb, NULL);
-exit:
- return err;
-}
-
-static int xe_oa_load_with_lri(struct xe_oa_stream *stream, struct xe_oa_reg *reg_lri)
-{
- struct xe_bb *bb;
- int err;
+ write_cs_mi_lri(bb, reg_lri, count);
- bb = xe_bb_new(stream->gt, 3, false);
- if (IS_ERR(bb)) {
- err = PTR_ERR(bb);
- goto exit;
+ fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_NO_DEPS, bb);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto free_bb;
}
+ xe_bb_free(bb, fence);
+ dma_fence_put(fence);
- write_cs_mi_lri(bb, reg_lri, 1);
-
- err = xe_oa_submit_bb(stream, bb);
+ return 0;
+free_bb:
xe_bb_free(bb, NULL);
exit:
return err;
@@ -695,70 +737,55 @@ exit:
static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable)
{
const struct xe_oa_format *format = stream->oa_buffer.format;
- struct xe_lrc *lrc = stream->exec_q->lrc[0];
- u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
- struct flex regs_context[] = {
+ struct xe_oa_reg reg_lri[] = {
{
OACTXCONTROL(stream->hwe->mmio_base),
- stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
enable ? OA_COUNTER_RESUME : 0,
},
{
+ OAR_OACONTROL,
+ oacontrol,
+ },
+ {
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
- regs_offset + CTX_CONTEXT_CONTROL,
- _MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE),
+ _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
+ enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0)
},
};
- struct xe_oa_reg reg_lri = { OAR_OACONTROL, oacontrol };
- int err;
- /* Modify stream hwe context image with regs_context */
- err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
- regs_context, ARRAY_SIZE(regs_context));
- if (err)
- return err;
-
- /* Apply reg_lri using LRI */
- return xe_oa_load_with_lri(stream, &reg_lri);
+ return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri));
}
static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable)
{
const struct xe_oa_format *format = stream->oa_buffer.format;
- struct xe_lrc *lrc = stream->exec_q->lrc[0];
- u32 regs_offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
u32 oacontrol = __format_to_oactrl(format, OAR_OACONTROL_COUNTER_SEL_MASK) |
(enable ? OAR_OACONTROL_COUNTER_ENABLE : 0);
- struct flex regs_context[] = {
+ struct xe_oa_reg reg_lri[] = {
{
OACTXCONTROL(stream->hwe->mmio_base),
- stream->oa->ctx_oactxctrl_offset[stream->hwe->class] + 1,
enable ? OA_COUNTER_RESUME : 0,
},
{
+ OAC_OACONTROL,
+ oacontrol
+ },
+ {
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
- regs_offset + CTX_CONTEXT_CONTROL,
- _MASKED_BIT_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE) |
+ _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
+ enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) |
_MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0),
},
};
- struct xe_oa_reg reg_lri = { OAC_OACONTROL, oacontrol };
- int err;
/* Set ccs select to enable programming of OAC_OACONTROL */
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctrl, __oa_ccs_select(stream));
+ xe_mmio_write32(&stream->gt->mmio, __oa_regs(stream)->oa_ctrl,
+ __oa_ccs_select(stream));
- /* Modify stream hwe context image with regs_context */
- err = xe_oa_modify_ctx_image(stream, stream->exec_q->lrc[0],
- regs_context, ARRAY_SIZE(regs_context));
- if (err)
- return err;
-
- /* Apply reg_lri using LRI */
- return xe_oa_load_with_lri(stream, &reg_lri);
+ return xe_oa_load_with_lri(stream, reg_lri, ARRAY_SIZE(reg_lri));
}
static int xe_oa_configure_oa_context(struct xe_oa_stream *stream, bool enable)
@@ -785,20 +812,18 @@ static u32 oag_configure_mmio_trigger(const struct xe_oa_stream *stream, bool en
static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
{
+ struct xe_mmio *mmio = &stream->gt->mmio;
u32 sqcnt1;
- /*
- * Wa_1508761755:xehpsdv, dg2
- * Enable thread stall DOP gating and EU DOP gating.
- */
- if (stream->oa->xe->info.platform == XE_DG2) {
+ /* Enable thread stall DOP gating and EU DOP gating. */
+ if (XE_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
_MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
_MASKED_BIT_DISABLE(DISABLE_DOP_GATING));
}
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_debug,
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug,
oag_configure_mmio_trigger(stream, false));
/* disable the context save/restore or OAR counters */
@@ -806,13 +831,13 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
xe_oa_configure_oa_context(stream, false);
/* Make sure we disable noa to save power. */
- xe_mmio_rmw32(stream->gt, RPM_CONFIG1, GT_NOA_ENABLE, 0);
+ xe_mmio_rmw32(mmio, RPM_CONFIG1, GT_NOA_ENABLE, 0);
sqcnt1 = SQCNT1_PMON_ENABLE |
(HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0);
/* Reset PMON Enable to save power. */
- xe_mmio_rmw32(stream->gt, XELPMP_SQCNT1, sqcnt1, 0);
+ xe_mmio_rmw32(mmio, XELPMP_SQCNT1, sqcnt1, 0);
}
static void xe_oa_stream_destroy(struct xe_oa_stream *stream)
@@ -832,7 +857,7 @@ static void xe_oa_stream_destroy(struct xe_oa_stream *stream)
xe_oa_free_oa_buffer(stream);
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
xe_pm_runtime_put(stream->oa->xe);
/* Wa_1509372804:pvc: Unset the override of GUCRC mode to enable rc6 */
@@ -840,17 +865,15 @@ static void xe_oa_stream_destroy(struct xe_oa_stream *stream)
xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(&gt->uc.guc.pc));
xe_oa_free_configs(stream);
+ xe_file_put(stream->xef);
}
-static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream)
+static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream, size_t size)
{
struct xe_bo *bo;
- BUILD_BUG_ON_NOT_POWER_OF_2(XE_OA_BUFFER_SIZE);
- BUILD_BUG_ON(XE_OA_BUFFER_SIZE < SZ_128K || XE_OA_BUFFER_SIZE > SZ_16M);
-
bo = xe_bo_create_pin_map(stream->oa->xe, stream->gt->tile, NULL,
- XE_OA_BUFFER_SIZE, ttm_bo_type_kernel,
+ size, ttm_bo_type_kernel,
XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -910,11 +933,62 @@ out:
return oa_bo;
}
+static void xe_oa_update_last_fence(struct xe_oa_stream *stream, struct dma_fence *fence)
+{
+ dma_fence_put(stream->last_fence);
+ stream->last_fence = dma_fence_get(fence);
+}
+
+static void xe_oa_fence_work_fn(struct work_struct *w)
+{
+ struct xe_oa_fence *ofence = container_of(w, typeof(*ofence), work.work);
+
+ /* Signal fence to indicate new OA configuration is active */
+ dma_fence_signal(&ofence->base);
+ dma_fence_put(&ofence->base);
+}
+
+static void xe_oa_config_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ /* Additional empirical delay needed for NOA programming after registers are written */
+#define NOA_PROGRAM_ADDITIONAL_DELAY_US 500
+
+ struct xe_oa_fence *ofence = container_of(cb, typeof(*ofence), cb);
+
+ INIT_DELAYED_WORK(&ofence->work, xe_oa_fence_work_fn);
+ queue_delayed_work(system_unbound_wq, &ofence->work,
+ usecs_to_jiffies(NOA_PROGRAM_ADDITIONAL_DELAY_US));
+ dma_fence_put(fence);
+}
+
+static const char *xe_oa_get_driver_name(struct dma_fence *fence)
+{
+ return "xe_oa";
+}
+
+static const char *xe_oa_get_timeline_name(struct dma_fence *fence)
+{
+ return "unbound";
+}
+
+static const struct dma_fence_ops xe_oa_fence_ops = {
+ .get_driver_name = xe_oa_get_driver_name,
+ .get_timeline_name = xe_oa_get_timeline_name,
+};
+
static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config *config)
{
#define NOA_PROGRAM_ADDITIONAL_DELAY_US 500
struct xe_oa_config_bo *oa_bo;
- int err, us = NOA_PROGRAM_ADDITIONAL_DELAY_US;
+ struct xe_oa_fence *ofence;
+ int i, err, num_signal = 0;
+ struct dma_fence *fence;
+
+ ofence = kzalloc(sizeof(*ofence), GFP_KERNEL);
+ if (!ofence) {
+ err = -ENOMEM;
+ goto exit;
+ }
oa_bo = xe_oa_alloc_config_buffer(stream, config);
if (IS_ERR(oa_bo)) {
@@ -922,11 +996,50 @@ static int xe_oa_emit_oa_config(struct xe_oa_stream *stream, struct xe_oa_config
goto exit;
}
- err = xe_oa_submit_bb(stream, oa_bo->bb);
+ /* Emit OA configuration batch */
+ fence = xe_oa_submit_bb(stream, XE_OA_SUBMIT_ADD_DEPS, oa_bo->bb);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto exit;
+ }
- /* Additional empirical delay needed for NOA programming after registers are written */
- usleep_range(us, 2 * us);
+ /* Point of no return: initialize and set fence to signal */
+ spin_lock_init(&ofence->lock);
+ dma_fence_init(&ofence->base, &xe_oa_fence_ops, &ofence->lock, 0, 0);
+
+ for (i = 0; i < stream->num_syncs; i++) {
+ if (stream->syncs[i].flags & DRM_XE_SYNC_FLAG_SIGNAL)
+ num_signal++;
+ xe_sync_entry_signal(&stream->syncs[i], &ofence->base);
+ }
+
+ /* Additional dma_fence_get in case we dma_fence_wait */
+ if (!num_signal)
+ dma_fence_get(&ofence->base);
+
+ /* Update last fence too before adding callback */
+ xe_oa_update_last_fence(stream, fence);
+
+ /* Add job fence callback to schedule work to signal ofence->base */
+ err = dma_fence_add_callback(fence, &ofence->cb, xe_oa_config_cb);
+ xe_gt_assert(stream->gt, !err || err == -ENOENT);
+ if (err == -ENOENT)
+ xe_oa_config_cb(fence, &ofence->cb);
+
+ /* If nothing needs to be signaled we wait synchronously */
+ if (!num_signal) {
+ dma_fence_wait(&ofence->base, false);
+ dma_fence_put(&ofence->base);
+ }
+
+ /* Done with syncs */
+ for (i = 0; i < stream->num_syncs; i++)
+ xe_sync_entry_cleanup(&stream->syncs[i]);
+ kfree(stream->syncs);
+
+ return 0;
exit:
+ kfree(ofence);
return err;
}
@@ -938,17 +1051,24 @@ static u32 oag_report_ctx_switches(const struct xe_oa_stream *stream)
0 : OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
}
+static u32 oag_buf_size_select(const struct xe_oa_stream *stream)
+{
+ return _MASKED_FIELD(OAG_OA_DEBUG_BUF_SIZE_SELECT,
+ stream->oa_buffer.bo->size > SZ_16M ?
+ OAG_OA_DEBUG_BUF_SIZE_SELECT : 0);
+}
+
static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
{
+ struct xe_mmio *mmio = &stream->gt->mmio;
u32 oa_debug, sqcnt1;
int ret;
/*
- * Wa_1508761755:xehpsdv, dg2
* EU NOA signals behave incorrectly if EU clock gating is enabled.
* Disable thread stall DOP gating and EU DOP gating.
*/
- if (stream->oa->xe->info.platform == XE_DG2) {
+ if (XE_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
_MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
@@ -966,12 +1086,13 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
OAG_OA_DEBUG_DISABLE_START_TRG_2_COUNT_QUAL |
OAG_OA_DEBUG_DISABLE_START_TRG_1_COUNT_QUAL;
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_debug,
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug,
_MASKED_BIT_ENABLE(oa_debug) |
oag_report_ctx_switches(stream) |
+ oag_buf_size_select(stream) |
oag_configure_mmio_trigger(stream, true));
- xe_mmio_write32(stream->gt, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ?
+ xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctx_ctrl, stream->periodic ?
(OAG_OAGLBCTXCTRL_COUNTER_RESUME |
OAG_OAGLBCTXCTRL_TIMER_ENABLE |
REG_FIELD_PREP(OAG_OAGLBCTXCTRL_TIMER_PERIOD_MASK,
@@ -985,7 +1106,7 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
sqcnt1 = SQCNT1_PMON_ENABLE |
(HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0);
- xe_mmio_rmw32(stream->gt, XELPMP_SQCNT1, 0, sqcnt1);
+ xe_mmio_rmw32(mmio, XELPMP_SQCNT1, 0, sqcnt1);
/* Configure OAR/OAC */
if (stream->exec_q) {
@@ -997,6 +1118,288 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
return xe_oa_emit_oa_config(stream, stream->oa_config);
}
+static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *name)
+{
+ u32 counter_size = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE, fmt);
+ u32 counter_sel = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SEL, fmt);
+ u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt);
+ u32 type = FIELD_GET(DRM_XE_OA_FORMAT_MASK_FMT_TYPE, fmt);
+ int idx;
+
+ for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) {
+ const struct xe_oa_format *f = &oa->oa_formats[idx];
+
+ if (counter_size == f->counter_size && bc_report == f->bc_report &&
+ type == f->type && counter_sel == f->counter_select) {
+ *name = idx;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ if (value >= oa->oa_unit_ids) {
+ drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value);
+ return -EINVAL;
+ }
+ param->oa_unit_id = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_sample_oa(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ param->sample = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_metric_set(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ param->metric_set = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_oa_format(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ int ret = decode_oa_format(oa, value, &param->oa_format);
+
+ if (ret) {
+ drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value);
+ return ret;
+ }
+ return 0;
+}
+
+static int xe_oa_set_prop_oa_exponent(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+#define OA_EXPONENT_MAX 31
+
+ if (value > OA_EXPONENT_MAX) {
+ drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX);
+ return -EINVAL;
+ }
+ param->period_exponent = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_disabled(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ param->disabled = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_exec_queue_id(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ param->exec_queue_id = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_engine_instance(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ param->engine_instance = value;
+ return 0;
+}
+
+static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ param->no_preempt = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_num_syncs(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ param->num_syncs = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_syncs_user(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ param->syncs_user = u64_to_user_ptr(value);
+ return 0;
+}
+
+static int xe_oa_set_prop_oa_buffer_size(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ if (!is_power_of_2(value) || value < SZ_128K || value > SZ_128M) {
+ drm_dbg(&oa->xe->drm, "OA buffer size invalid %llu\n", value);
+ return -EINVAL;
+ }
+ param->oa_buffer_size = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_wait_num_reports(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ if (!value) {
+ drm_dbg(&oa->xe->drm, "wait_num_reports %llu\n", value);
+ return -EINVAL;
+ }
+ param->wait_num_reports = value;
+ return 0;
+}
+
+static int xe_oa_set_prop_ret_inval(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param)
+{
+ return -EINVAL;
+}
+
+typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value,
+ struct xe_oa_open_param *param);
+static const xe_oa_set_property_fn xe_oa_set_property_funcs_open[] = {
+ [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id,
+ [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa,
+ [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set,
+ [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_oa_format,
+ [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_oa_exponent,
+ [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_disabled,
+ [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id,
+ [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance,
+ [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt,
+ [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
+ [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
+ [DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE] = xe_oa_set_prop_oa_buffer_size,
+ [DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS] = xe_oa_set_prop_wait_num_reports,
+};
+
+static const xe_oa_set_property_fn xe_oa_set_property_funcs_config[] = {
+ [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set,
+ [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs,
+ [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user,
+ [DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE] = xe_oa_set_prop_ret_inval,
+ [DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS] = xe_oa_set_prop_ret_inval,
+};
+
+static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_from from,
+ u64 extension, struct xe_oa_open_param *param)
+{
+ u64 __user *address = u64_to_user_ptr(extension);
+ struct drm_xe_ext_set_property ext;
+ int err;
+ u32 idx;
+
+ err = copy_from_user(&ext, address, sizeof(ext));
+ if (XE_IOCTL_DBG(oa->xe, err))
+ return -EFAULT;
+
+ BUILD_BUG_ON(ARRAY_SIZE(xe_oa_set_property_funcs_open) !=
+ ARRAY_SIZE(xe_oa_set_property_funcs_config));
+
+ if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs_open)) ||
+ XE_IOCTL_DBG(oa->xe, ext.pad))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs_open));
+
+ if (from == XE_OA_USER_EXTN_FROM_CONFIG)
+ return xe_oa_set_property_funcs_config[idx](oa, ext.value, param);
+ else
+ return xe_oa_set_property_funcs_open[idx](oa, ext.value, param);
+}
+
+typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, enum xe_oa_user_extn_from from,
+ u64 extension, struct xe_oa_open_param *param);
+static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = {
+ [DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property,
+};
+
+#define MAX_USER_EXTENSIONS 16
+static int xe_oa_user_extensions(struct xe_oa *oa, enum xe_oa_user_extn_from from, u64 extension,
+ int ext_number, struct xe_oa_open_param *param)
+{
+ u64 __user *address = u64_to_user_ptr(extension);
+ struct drm_xe_user_extension ext;
+ int err;
+ u32 idx;
+
+ if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS))
+ return -E2BIG;
+
+ err = copy_from_user(&ext, address, sizeof(ext));
+ if (XE_IOCTL_DBG(oa->xe, err))
+ return -EFAULT;
+
+ if (XE_IOCTL_DBG(oa->xe, ext.pad) ||
+ XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs)))
+ return -EINVAL;
+
+ idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs));
+ err = xe_oa_user_extension_funcs[idx](oa, from, extension, param);
+ if (XE_IOCTL_DBG(oa->xe, err))
+ return err;
+
+ if (ext.next_extension)
+ return xe_oa_user_extensions(oa, from, ext.next_extension, ++ext_number, param);
+
+ return 0;
+}
+
+static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param)
+{
+ int ret, num_syncs, num_ufence = 0;
+
+ if (param->num_syncs && !param->syncs_user) {
+ drm_dbg(&oa->xe->drm, "num_syncs specified without sync array\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (param->num_syncs) {
+ param->syncs = kcalloc(param->num_syncs, sizeof(*param->syncs), GFP_KERNEL);
+ if (!param->syncs) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ }
+
+ for (num_syncs = 0; num_syncs < param->num_syncs; num_syncs++) {
+ ret = xe_sync_entry_parse(oa->xe, param->xef, &param->syncs[num_syncs],
+ &param->syncs_user[num_syncs], 0);
+ if (ret)
+ goto err_syncs;
+
+ if (xe_sync_is_ufence(&param->syncs[num_syncs]))
+ num_ufence++;
+ }
+
+ if (XE_IOCTL_DBG(oa->xe, num_ufence > 1)) {
+ ret = -EINVAL;
+ goto err_syncs;
+ }
+
+ return 0;
+
+err_syncs:
+ while (num_syncs--)
+ xe_sync_entry_cleanup(&param->syncs[num_syncs]);
+ kfree(param->syncs);
+exit:
+ return ret;
+}
+
static void xe_oa_stream_enable(struct xe_oa_stream *stream)
{
stream->pollin = false;
@@ -1090,36 +1493,38 @@ static int xe_oa_disable_locked(struct xe_oa_stream *stream)
static long xe_oa_config_locked(struct xe_oa_stream *stream, u64 arg)
{
- struct drm_xe_ext_set_property ext;
+ struct xe_oa_open_param param = {};
long ret = stream->oa_config->id;
struct xe_oa_config *config;
int err;
- err = __copy_from_user(&ext, u64_to_user_ptr(arg), sizeof(ext));
- if (XE_IOCTL_DBG(stream->oa->xe, err))
- return -EFAULT;
-
- if (XE_IOCTL_DBG(stream->oa->xe, ext.pad) ||
- XE_IOCTL_DBG(stream->oa->xe, ext.base.name != DRM_XE_OA_EXTENSION_SET_PROPERTY) ||
- XE_IOCTL_DBG(stream->oa->xe, ext.base.next_extension) ||
- XE_IOCTL_DBG(stream->oa->xe, ext.property != DRM_XE_OA_PROPERTY_OA_METRIC_SET))
- return -EINVAL;
+ err = xe_oa_user_extensions(stream->oa, XE_OA_USER_EXTN_FROM_CONFIG, arg, 0, &param);
+ if (err)
+ return err;
- config = xe_oa_get_oa_config(stream->oa, ext.value);
+ config = xe_oa_get_oa_config(stream->oa, param.metric_set);
if (!config)
return -ENODEV;
- if (config != stream->oa_config) {
- err = xe_oa_emit_oa_config(stream, config);
- if (!err)
- config = xchg(&stream->oa_config, config);
- else
- ret = err;
+ param.xef = stream->xef;
+ err = xe_oa_parse_syncs(stream->oa, &param);
+ if (err)
+ goto err_config_put;
+
+ stream->num_syncs = param.num_syncs;
+ stream->syncs = param.syncs;
+
+ err = xe_oa_emit_oa_config(stream, config);
+ if (!err) {
+ config = xchg(&stream->oa_config, config);
+ drm_dbg(&stream->oa->xe->drm, "changed to oa config uuid=%s\n",
+ stream->oa_config->uuid);
}
+err_config_put:
xe_oa_config_put(config);
- return ret;
+ return err ?: ret;
}
static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg)
@@ -1145,7 +1550,7 @@ static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg)
static long xe_oa_info_locked(struct xe_oa_stream *stream, unsigned long arg)
{
- struct drm_xe_oa_stream_info info = { .oa_buf_size = XE_OA_BUFFER_SIZE, };
+ struct drm_xe_oa_stream_info info = { .oa_buf_size = stream->oa_buffer.bo->size, };
void __user *uaddr = (void __user *)arg;
if (copy_to_user(uaddr, &info, sizeof(info)))
@@ -1206,9 +1611,11 @@ static int xe_oa_release(struct inode *inode, struct file *file)
struct xe_oa_stream *stream = file->private_data;
struct xe_gt *gt = stream->gt;
+ xe_pm_runtime_get(gt_to_xe(gt));
mutex_lock(&gt->oa.gt_lock);
xe_oa_destroy_locked(stream);
mutex_unlock(&gt->oa.gt_lock);
+ xe_pm_runtime_put(gt_to_xe(gt));
/* Release the reference the OA stream kept on the driver */
drm_dev_put(&gt_to_xe(gt)->drm);
@@ -1229,7 +1636,7 @@ static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma)
}
/* Can mmap the entire OA buffer or nothing (no partial OA buffer mmaps) */
- if (vma->vm_end - vma->vm_start != XE_OA_BUFFER_SIZE) {
+ if (vma->vm_end - vma->vm_start != stream->oa_buffer.bo->size) {
drm_dbg(&stream->oa->xe->drm, "Wrong mmap size, must be OA buffer size\n");
return -EINVAL;
}
@@ -1267,86 +1674,12 @@ static const struct file_operations xe_oa_fops = {
.mmap = xe_oa_mmap,
};
-static bool engine_supports_mi_query(struct xe_hw_engine *hwe)
-{
- return hwe->class == XE_ENGINE_CLASS_RENDER ||
- hwe->class == XE_ENGINE_CLASS_COMPUTE;
-}
-
-static bool xe_oa_find_reg_in_lri(u32 *state, u32 reg, u32 *offset, u32 end)
-{
- u32 idx = *offset;
- u32 len = min(MI_LRI_LEN(state[idx]) + idx, end);
- bool found = false;
-
- idx++;
- for (; idx < len; idx += 2) {
- if (state[idx] == reg) {
- found = true;
- break;
- }
- }
-
- *offset = idx;
- return found;
-}
-
-#define IS_MI_LRI_CMD(x) (REG_FIELD_GET(MI_OPCODE, (x)) == \
- REG_FIELD_GET(MI_OPCODE, MI_LOAD_REGISTER_IMM))
-
-static u32 xe_oa_context_image_offset(struct xe_oa_stream *stream, u32 reg)
-{
- struct xe_lrc *lrc = stream->exec_q->lrc[0];
- u32 len = (xe_gt_lrc_size(stream->gt, stream->hwe->class) +
- lrc->ring.size) / sizeof(u32);
- u32 offset = xe_lrc_regs_offset(lrc) / sizeof(u32);
- u32 *state = (u32 *)lrc->bo->vmap.vaddr;
-
- if (drm_WARN_ON(&stream->oa->xe->drm, !state))
- return U32_MAX;
-
- for (; offset < len; ) {
- if (IS_MI_LRI_CMD(state[offset])) {
- /*
- * We expect reg-value pairs in MI_LRI command, so
- * MI_LRI_LEN() should be even
- */
- drm_WARN_ON(&stream->oa->xe->drm,
- MI_LRI_LEN(state[offset]) & 0x1);
-
- if (xe_oa_find_reg_in_lri(state, reg, &offset, len))
- break;
- } else {
- offset++;
- }
- }
-
- return offset < len ? offset : U32_MAX;
-}
-
-static int xe_oa_set_ctx_ctrl_offset(struct xe_oa_stream *stream)
-{
- struct xe_reg reg = OACTXCONTROL(stream->hwe->mmio_base);
- u32 offset = stream->oa->ctx_oactxctrl_offset[stream->hwe->class];
-
- /* Do this only once. Failure is stored as offset of U32_MAX */
- if (offset)
- goto exit;
-
- offset = xe_oa_context_image_offset(stream, reg.addr);
- stream->oa->ctx_oactxctrl_offset[stream->hwe->class] = offset;
-
- drm_dbg(&stream->oa->xe->drm, "%s oa ctx control at 0x%08x dword offset\n",
- stream->hwe->name, offset);
-exit:
- return offset && offset != U32_MAX ? 0 : -ENODEV;
-}
-
static int xe_oa_stream_init(struct xe_oa_stream *stream,
struct xe_oa_open_param *param)
{
struct xe_oa_unit *u = param->hwe->oa_unit;
struct xe_gt *gt = param->hwe->gt;
+ unsigned int fw_ref;
int ret;
stream->exec_q = param->exec_q;
@@ -1356,9 +1689,14 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format];
stream->sample = param->sample;
- stream->periodic = param->period_exponent > 0;
+ stream->periodic = param->period_exponent >= 0;
stream->period_exponent = param->period_exponent;
stream->no_preempt = param->no_preempt;
+ stream->wait_num_reports = param->wait_num_reports;
+
+ stream->xef = xe_file_get(param->xef);
+ stream->num_syncs = param->num_syncs;
+ stream->syncs = param->syncs;
/*
* For Xe2+, when overrun mode is enabled, there are no partial reports at the end
@@ -1368,20 +1706,10 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
if (GRAPHICS_VER(stream->oa->xe) >= 20 &&
stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG && stream->sample)
stream->oa_buffer.circ_size =
- XE_OA_BUFFER_SIZE - XE_OA_BUFFER_SIZE % stream->oa_buffer.format->size;
+ param->oa_buffer_size -
+ param->oa_buffer_size % stream->oa_buffer.format->size;
else
- stream->oa_buffer.circ_size = XE_OA_BUFFER_SIZE;
-
- if (stream->exec_q && engine_supports_mi_query(stream->hwe)) {
- /* If we don't find the context offset, just return error */
- ret = xe_oa_set_ctx_ctrl_offset(stream);
- if (ret) {
- drm_err(&stream->oa->xe->drm,
- "xe_oa_set_ctx_ctrl_offset failed for %s\n",
- stream->hwe->name);
- goto exit;
- }
- }
+ stream->oa_buffer.circ_size = param->oa_buffer_size;
stream->oa_config = xe_oa_get_oa_config(stream->oa, param->metric_set);
if (!stream->oa_config) {
@@ -1391,12 +1719,10 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
}
/*
- * Wa_1509372804:pvc
- *
* GuC reset of engines causes OA to lose configuration
* state. Prevent this by overriding GUCRC mode.
*/
- if (stream->oa->xe->info.platform == XE_PVC) {
+ if (XE_WA(stream->gt, 1509372804)) {
ret = xe_guc_pc_override_gucrc_mode(&gt->uc.guc.pc,
SLPC_GUCRC_MODE_GUCRC_NO_RC6);
if (ret)
@@ -1407,9 +1733,13 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
/* Take runtime pm ref and forcewake to disable RC6 */
xe_pm_runtime_get(stream->oa->xe);
- XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ ret = -ETIMEDOUT;
+ goto err_fw_put;
+ }
- ret = xe_oa_alloc_oa_buffer(stream);
+ ret = xe_oa_alloc_oa_buffer(stream, param->oa_buffer_size);
if (ret)
goto err_fw_put;
@@ -1434,8 +1764,8 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
WRITE_ONCE(u->exclusive_stream, stream);
- hrtimer_init(&stream->poll_check_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- stream->poll_check_timer.function = xe_oa_poll_check_timer_cb;
+ hrtimer_setup(&stream->poll_check_timer, xe_oa_poll_check_timer_cb, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
init_waitqueue_head(&stream->poll_wq);
spin_lock_init(&stream->oa_buffer.ptr_lock);
@@ -1449,13 +1779,14 @@ err_put_k_exec_q:
err_free_oa_buf:
xe_oa_free_oa_buffer(stream);
err_fw_put:
- XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(stream->oa->xe);
if (stream->override_gucrc)
xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(&gt->uc.guc.pc));
err_free_configs:
xe_oa_free_configs(stream);
exit:
+ xe_file_put(stream->xef);
return ret;
}
@@ -1523,23 +1854,14 @@ u32 xe_oa_timestamp_frequency(struct xe_gt *gt)
{
u32 reg, shift;
- /*
- * Wa_18013179988:dg2
- * Wa_14015568240:pvc
- * Wa_14015846243:mtl
- */
- switch (gt_to_xe(gt)->info.platform) {
- case XE_DG2:
- case XE_PVC:
- case XE_METEORLAKE:
+ if (XE_WA(gt, 18013179988) || XE_WA(gt, 14015568240)) {
xe_pm_runtime_get(gt_to_xe(gt));
- reg = xe_mmio_read32(gt, RPM_CONFIG0);
+ reg = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
xe_pm_runtime_put(gt_to_xe(gt));
shift = REG_FIELD_GET(RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK, reg);
return gt->info.reference_clock << (3 - shift);
-
- default:
+ } else {
return gt->info.reference_clock;
}
}
@@ -1565,27 +1887,6 @@ static bool engine_supports_oa_format(const struct xe_hw_engine *hwe, int type)
}
}
-static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *name)
-{
- u32 counter_size = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SIZE, fmt);
- u32 counter_sel = FIELD_GET(DRM_XE_OA_FORMAT_MASK_COUNTER_SEL, fmt);
- u32 bc_report = FIELD_GET(DRM_XE_OA_FORMAT_MASK_BC_REPORT, fmt);
- u32 type = FIELD_GET(DRM_XE_OA_FORMAT_MASK_FMT_TYPE, fmt);
- int idx;
-
- for_each_set_bit(idx, oa->format_mask, __XE_OA_FORMAT_MAX) {
- const struct xe_oa_format *f = &oa->oa_formats[idx];
-
- if (counter_size == f->counter_size && bc_report == f->bc_report &&
- type == f->type && counter_sel == f->counter_select) {
- *name = idx;
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
/**
* xe_oa_unit_id - Return OA unit ID for a hardware engine
* @hwe: @xe_hw_engine
@@ -1632,155 +1933,6 @@ out:
return ret;
}
-static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
-{
- if (value >= oa->oa_unit_ids) {
- drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value);
- return -EINVAL;
- }
- param->oa_unit_id = value;
- return 0;
-}
-
-static int xe_oa_set_prop_sample_oa(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
-{
- param->sample = value;
- return 0;
-}
-
-static int xe_oa_set_prop_metric_set(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
-{
- param->metric_set = value;
- return 0;
-}
-
-static int xe_oa_set_prop_oa_format(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
-{
- int ret = decode_oa_format(oa, value, &param->oa_format);
-
- if (ret) {
- drm_dbg(&oa->xe->drm, "Unsupported OA report format %#llx\n", value);
- return ret;
- }
- return 0;
-}
-
-static int xe_oa_set_prop_oa_exponent(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
-{
-#define OA_EXPONENT_MAX 31
-
- if (value > OA_EXPONENT_MAX) {
- drm_dbg(&oa->xe->drm, "OA timer exponent too high (> %u)\n", OA_EXPONENT_MAX);
- return -EINVAL;
- }
- param->period_exponent = value;
- return 0;
-}
-
-static int xe_oa_set_prop_disabled(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
-{
- param->disabled = value;
- return 0;
-}
-
-static int xe_oa_set_prop_exec_queue_id(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
-{
- param->exec_queue_id = value;
- return 0;
-}
-
-static int xe_oa_set_prop_engine_instance(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
-{
- param->engine_instance = value;
- return 0;
-}
-
-static int xe_oa_set_no_preempt(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param)
-{
- param->no_preempt = value;
- return 0;
-}
-
-typedef int (*xe_oa_set_property_fn)(struct xe_oa *oa, u64 value,
- struct xe_oa_open_param *param);
-static const xe_oa_set_property_fn xe_oa_set_property_funcs[] = {
- [DRM_XE_OA_PROPERTY_OA_UNIT_ID] = xe_oa_set_prop_oa_unit_id,
- [DRM_XE_OA_PROPERTY_SAMPLE_OA] = xe_oa_set_prop_sample_oa,
- [DRM_XE_OA_PROPERTY_OA_METRIC_SET] = xe_oa_set_prop_metric_set,
- [DRM_XE_OA_PROPERTY_OA_FORMAT] = xe_oa_set_prop_oa_format,
- [DRM_XE_OA_PROPERTY_OA_PERIOD_EXPONENT] = xe_oa_set_prop_oa_exponent,
- [DRM_XE_OA_PROPERTY_OA_DISABLED] = xe_oa_set_prop_disabled,
- [DRM_XE_OA_PROPERTY_EXEC_QUEUE_ID] = xe_oa_set_prop_exec_queue_id,
- [DRM_XE_OA_PROPERTY_OA_ENGINE_INSTANCE] = xe_oa_set_prop_engine_instance,
- [DRM_XE_OA_PROPERTY_NO_PREEMPT] = xe_oa_set_no_preempt,
-};
-
-static int xe_oa_user_ext_set_property(struct xe_oa *oa, u64 extension,
- struct xe_oa_open_param *param)
-{
- u64 __user *address = u64_to_user_ptr(extension);
- struct drm_xe_ext_set_property ext;
- int err;
- u32 idx;
-
- err = __copy_from_user(&ext, address, sizeof(ext));
- if (XE_IOCTL_DBG(oa->xe, err))
- return -EFAULT;
-
- if (XE_IOCTL_DBG(oa->xe, ext.property >= ARRAY_SIZE(xe_oa_set_property_funcs)) ||
- XE_IOCTL_DBG(oa->xe, ext.pad))
- return -EINVAL;
-
- idx = array_index_nospec(ext.property, ARRAY_SIZE(xe_oa_set_property_funcs));
- return xe_oa_set_property_funcs[idx](oa, ext.value, param);
-}
-
-typedef int (*xe_oa_user_extension_fn)(struct xe_oa *oa, u64 extension,
- struct xe_oa_open_param *param);
-static const xe_oa_user_extension_fn xe_oa_user_extension_funcs[] = {
- [DRM_XE_OA_EXTENSION_SET_PROPERTY] = xe_oa_user_ext_set_property,
-};
-
-#define MAX_USER_EXTENSIONS 16
-static int xe_oa_user_extensions(struct xe_oa *oa, u64 extension, int ext_number,
- struct xe_oa_open_param *param)
-{
- u64 __user *address = u64_to_user_ptr(extension);
- struct drm_xe_user_extension ext;
- int err;
- u32 idx;
-
- if (XE_IOCTL_DBG(oa->xe, ext_number >= MAX_USER_EXTENSIONS))
- return -E2BIG;
-
- err = __copy_from_user(&ext, address, sizeof(ext));
- if (XE_IOCTL_DBG(oa->xe, err))
- return -EFAULT;
-
- if (XE_IOCTL_DBG(oa->xe, ext.pad) ||
- XE_IOCTL_DBG(oa->xe, ext.name >= ARRAY_SIZE(xe_oa_user_extension_funcs)))
- return -EINVAL;
-
- idx = array_index_nospec(ext.name, ARRAY_SIZE(xe_oa_user_extension_funcs));
- err = xe_oa_user_extension_funcs[idx](oa, extension, param);
- if (XE_IOCTL_DBG(oa->xe, err))
- return err;
-
- if (ext.next_extension)
- return xe_oa_user_extensions(oa, ext.next_extension, ++ext_number, param);
-
- return 0;
-}
-
/**
* xe_oa_stream_open_ioctl - Opens an OA stream
* @dev: @drm_device
@@ -1806,7 +1958,9 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
return -ENODEV;
}
- ret = xe_oa_user_extensions(oa, data, 0, &param);
+ param.xef = xef;
+ param.period_exponent = -1;
+ ret = xe_oa_user_extensions(oa, XE_OA_USER_EXTN_FROM_OPEN, data, 0, &param);
if (ret)
return ret;
@@ -1815,8 +1969,8 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
if (XE_IOCTL_DBG(oa->xe, !param.exec_q))
return -ENOENT;
- if (param.exec_q->width > 1)
- drm_dbg(&oa->xe->drm, "exec_q->width > 1, programming only exec_q->lrc[0]\n");
+ if (XE_IOCTL_DBG(oa->xe, param.exec_q->width > 1))
+ return -EOPNOTSUPP;
}
/*
@@ -1860,7 +2014,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
goto err_exec_q;
}
- if (param.period_exponent > 0) {
+ if (param.period_exponent >= 0) {
u64 oa_period, oa_freq_hz;
/* Requesting samples from OAG buffer is a privileged operation */
@@ -1874,11 +2028,35 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
drm_dbg(&oa->xe->drm, "Using periodic sampling freq %lld Hz\n", oa_freq_hz);
}
+ if (!param.oa_buffer_size)
+ param.oa_buffer_size = DEFAULT_XE_OA_BUFFER_SIZE;
+
+ if (!param.wait_num_reports)
+ param.wait_num_reports = 1;
+ if (param.wait_num_reports > param.oa_buffer_size / f->size) {
+ drm_dbg(&oa->xe->drm, "wait_num_reports %d\n", param.wait_num_reports);
+ ret = -EINVAL;
+ goto err_exec_q;
+ }
+
+ ret = xe_oa_parse_syncs(oa, &param);
+ if (ret)
+ goto err_exec_q;
+
mutex_lock(&param.hwe->gt->oa.gt_lock);
ret = xe_oa_stream_open_ioctl_locked(oa, &param);
mutex_unlock(&param.hwe->gt->oa.gt_lock);
+ if (ret < 0)
+ goto err_sync_cleanup;
+
+ return ret;
+
+err_sync_cleanup:
+ while (param.num_syncs--)
+ xe_sync_entry_cleanup(&param.syncs[param.num_syncs]);
+ kfree(param.syncs);
err_exec_q:
- if (ret < 0 && param.exec_q)
+ if (param.exec_q)
xe_exec_queue_put(param.exec_q);
return ret;
}
@@ -1978,6 +2156,7 @@ static const struct xe_mmio_range xe2_oa_mux_regs[] = {
{ .start = 0x5194, .end = 0x5194 }, /* SYS_MEM_LAT_MEASURE_MERTF_GRP_3D */
{ .start = 0x8704, .end = 0x8704 }, /* LMEM_LAT_MEASURE_MCFG_GRP */
{ .start = 0xB1BC, .end = 0xB1BC }, /* L3_BANK_LAT_MEASURE_LBCF_GFX */
+ { .start = 0xD0E0, .end = 0xD0F4 }, /* VISACTL */
{ .start = 0xE18C, .end = 0xE18C }, /* SAMPLER_MODE */
{ .start = 0xE590, .end = 0xE590 }, /* TDL_LSC_LAT_MEASURE_TDL_GFX */
{ .start = 0x13000, .end = 0x137FC }, /* PES_0_PESL0 - PES_63_UPPER_PESL3 */
@@ -2042,6 +2221,7 @@ addr_err:
kfree(oa_regs);
return ERR_PTR(err);
}
+ALLOW_ERROR_INJECTION(xe_oa_alloc_regs, ERRNO);
static ssize_t show_dynamic_id(struct kobject *kobj,
struct kobj_attribute *attr,
@@ -2101,7 +2281,7 @@ int xe_oa_add_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *fi
return -EACCES;
}
- err = __copy_from_user(&param, u64_to_user_ptr(data), sizeof(param));
+ err = copy_from_user(&param, u64_to_user_ptr(data), sizeof(param));
if (XE_IOCTL_DBG(oa->xe, err))
return -EFAULT;
@@ -2234,36 +2414,36 @@ err_unlock:
return ret;
}
+static void xe_oa_unregister(void *arg)
+{
+ struct xe_oa *oa = arg;
+
+ if (!oa->metrics_kobj)
+ return;
+
+ kobject_put(oa->metrics_kobj);
+ oa->metrics_kobj = NULL;
+}
+
/**
* xe_oa_register - Xe OA registration
* @xe: @xe_device
*
* Exposes the metrics sysfs directory upon completion of module initialization
*/
-void xe_oa_register(struct xe_device *xe)
+int xe_oa_register(struct xe_device *xe)
{
struct xe_oa *oa = &xe->oa;
if (!oa->xe)
- return;
+ return 0;
oa->metrics_kobj = kobject_create_and_add("metrics",
&xe->drm.primary->kdev->kobj);
-}
-
-/**
- * xe_oa_unregister - Xe OA de-registration
- * @xe: @xe_device
- */
-void xe_oa_unregister(struct xe_device *xe)
-{
- struct xe_oa *oa = &xe->oa;
-
if (!oa->metrics_kobj)
- return;
+ return -ENOMEM;
- kobject_put(oa->metrics_kobj);
- oa->metrics_kobj = NULL;
+ return devm_add_action_or_reset(xe->drm.dev, xe_oa_unregister, oa);
}
static u32 num_oa_units_per_gt(struct xe_gt *gt)
@@ -2348,8 +2528,10 @@ static void __xe_oa_init_oa_units(struct xe_gt *gt)
u->type = DRM_XE_OA_UNIT_TYPE_OAM;
}
+ xe_mmio_write32(&gt->mmio, u->regs.oa_ctrl, 0);
+
/* Ensure MMIO trigger remains disabled till there is a stream */
- xe_mmio_write32(gt, u->regs.oa_debug,
+ xe_mmio_write32(&gt->mmio, u->regs.oa_debug,
oag_configure_mmio_trigger(NULL, false));
/* Set oa_unit_ids now to ensure ids remain contiguous */
@@ -2450,6 +2632,27 @@ static void xe_oa_init_supported_formats(struct xe_oa *oa)
}
}
+static int destroy_config(int id, void *p, void *data)
+{
+ xe_oa_config_put(p);
+
+ return 0;
+}
+
+static void xe_oa_fini(void *arg)
+{
+ struct xe_device *xe = arg;
+ struct xe_oa *oa = &xe->oa;
+
+ if (!oa->xe)
+ return;
+
+ idr_for_each(&oa->metrics_idr, destroy_config, oa);
+ idr_destroy(&oa->metrics_idr);
+
+ oa->xe = NULL;
+}
+
/**
* xe_oa_init - OA initialization during device probe
* @xe: @xe_device
@@ -2481,31 +2684,10 @@ int xe_oa_init(struct xe_device *xe)
}
xe_oa_init_supported_formats(oa);
- return 0;
-exit:
- oa->xe = NULL;
- return ret;
-}
-static int destroy_config(int id, void *p, void *data)
-{
- xe_oa_config_put(p);
- return 0;
-}
-
-/**
- * xe_oa_fini - OA de-initialization during device remove
- * @xe: @xe_device
- */
-void xe_oa_fini(struct xe_device *xe)
-{
- struct xe_oa *oa = &xe->oa;
-
- if (!oa->xe)
- return;
-
- idr_for_each(&oa->metrics_idr, destroy_config, oa);
- idr_destroy(&oa->metrics_idr);
+ return devm_add_action_or_reset(xe->drm.dev, xe_oa_fini, xe);
+exit:
oa->xe = NULL;
+ return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_oa.h b/drivers/gpu/drm/xe/xe_oa.h
index 87a38820c317..e510826f9efc 100644
--- a/drivers/gpu/drm/xe/xe_oa.h
+++ b/drivers/gpu/drm/xe/xe_oa.h
@@ -15,9 +15,7 @@ struct xe_gt;
struct xe_hw_engine;
int xe_oa_init(struct xe_device *xe);
-void xe_oa_fini(struct xe_device *xe);
-void xe_oa_register(struct xe_device *xe);
-void xe_oa_unregister(struct xe_device *xe);
+int xe_oa_register(struct xe_device *xe);
int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *file);
int xe_oa_add_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *file);
int xe_oa_remove_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *file);
diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
index 8862eca73fbe..52e33c37d5ee 100644
--- a/drivers/gpu/drm/xe/xe_oa_types.h
+++ b/drivers/gpu/drm/xe/xe_oa_types.h
@@ -15,7 +15,7 @@
#include "regs/xe_reg_defs.h"
#include "xe_hw_engine_types.h"
-#define XE_OA_BUFFER_SIZE SZ_16M
+#define DEFAULT_XE_OA_BUFFER_SIZE SZ_16M
enum xe_oa_report_header {
HDR_32_BIT = 0,
@@ -138,9 +138,6 @@ struct xe_oa {
/** @metrics_idr: List of dynamic configurations (struct xe_oa_config) */
struct idr metrics_idr;
- /** @ctx_oactxctrl_offset: offset of OACTXCONTROL register in context image */
- u32 ctx_oactxctrl_offset[XE_ENGINE_CLASS_MAX];
-
/** @oa_formats: tracks all OA formats across platforms */
const struct xe_oa_format *oa_formats;
@@ -218,6 +215,9 @@ struct xe_oa_stream {
/** @pollin: Whether there is data available to read */
bool pollin;
+ /** @wait_num_reports: Number of reports to wait for before signalling pollin */
+ int wait_num_reports;
+
/** @periodic: Whether periodic sampling is currently enabled */
bool periodic;
@@ -238,5 +238,17 @@ struct xe_oa_stream {
/** @no_preempt: Whether preemption and timeslicing is disabled for stream exec_q */
u32 no_preempt;
+
+ /** @xef: xe_file with which the stream was opened */
+ struct xe_file *xef;
+
+ /** @last_fence: fence to use in stream destroy when needed */
+ struct dma_fence *last_fence;
+
+ /** @num_syncs: size of @syncs array */
+ u32 num_syncs;
+
+ /** @syncs: syncs to wait on and to signal */
+ struct xe_sync_entry *syncs;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_observation.c b/drivers/gpu/drm/xe/xe_observation.c
index 8ec1b84cbb9e..e3f9b546207e 100644
--- a/drivers/gpu/drm/xe/xe_observation.c
+++ b/drivers/gpu/drm/xe/xe_observation.c
@@ -8,6 +8,7 @@
#include <uapi/drm/xe_drm.h>
+#include "xe_eu_stall.h"
#include "xe_oa.h"
#include "xe_observation.h"
@@ -29,6 +30,17 @@ static int xe_oa_ioctl(struct drm_device *dev, struct drm_xe_observation_param *
}
}
+static int xe_eu_stall_ioctl(struct drm_device *dev, struct drm_xe_observation_param *arg,
+ struct drm_file *file)
+{
+ switch (arg->observation_op) {
+ case DRM_XE_OBSERVATION_OP_STREAM_OPEN:
+ return xe_eu_stall_stream_open(dev, arg->param, file);
+ default:
+ return -EINVAL;
+ }
+}
+
/**
* xe_observation_ioctl - The top level observation layer ioctl
* @dev: @drm_device
@@ -51,12 +63,14 @@ int xe_observation_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
switch (arg->observation_type) {
case DRM_XE_OBSERVATION_TYPE_OA:
return xe_oa_ioctl(dev, arg, file);
+ case DRM_XE_OBSERVATION_TYPE_EU_STALL:
+ return xe_eu_stall_ioctl(dev, arg, file);
default:
return -EINVAL;
}
}
-static struct ctl_table observation_ctl_table[] = {
+static const struct ctl_table observation_ctl_table[] = {
{
.procname = "observation_paranoid",
.data = &xe_observation_paranoid,
diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
index f291a1730024..30fdbdb9341e 100644
--- a/drivers/gpu/drm/xe/xe_pat.c
+++ b/drivers/gpu/drm/xe/xe_pat.c
@@ -100,6 +100,10 @@ static const struct xe_pat_table_entry xelpg_pat_table[] = {
* Reserved entries should be programmed with the maximum caching, minimum
* coherency (which matches an all-0's encoding), so we can just omit them
* in the table.
+ *
+ * Note: There is an implicit assumption in the driver that compression and
+ * coh_1way+ are mutually exclusive. If this is ever not true then userptr
+ * and imported dma-buf from external device will have uncleared ccs state.
*/
#define XE2_PAT(no_promote, comp_en, l3clos, l3_policy, l4_policy, __coh_mode) \
{ \
@@ -109,7 +113,8 @@ static const struct xe_pat_table_entry xelpg_pat_table[] = {
REG_FIELD_PREP(XE2_L3_POLICY, l3_policy) | \
REG_FIELD_PREP(XE2_L4_POLICY, l4_policy) | \
REG_FIELD_PREP(XE2_COH_MODE, __coh_mode), \
- .coh_mode = __coh_mode ? XE_COH_AT_LEAST_1WAY : XE_COH_NONE \
+ .coh_mode = (BUILD_BUG_ON_ZERO(__coh_mode && comp_en) || __coh_mode) ? \
+ XE_COH_AT_LEAST_1WAY : XE_COH_NONE \
}
static const struct xe_pat_table_entry xe2_pat_table[] = {
@@ -160,7 +165,7 @@ static void program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[
for (int i = 0; i < n_entries; i++) {
struct xe_reg reg = XE_REG(_PAT_INDEX(i));
- xe_mmio_write32(gt, reg, table[i].value);
+ xe_mmio_write32(&gt->mmio, reg, table[i].value);
}
}
@@ -177,25 +182,24 @@ static void program_pat_mcr(struct xe_gt *gt, const struct xe_pat_table_entry ta
static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
- int i, err;
+ unsigned int fw_ref;
+ int i;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- goto err_fw;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return;
drm_printf(p, "PAT table:\n");
for (i = 0; i < xe->pat.n_entries; i++) {
- u32 pat = xe_mmio_read32(gt, XE_REG(_PAT_INDEX(i)));
+ u32 pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_INDEX(i)));
u8 mem_type = REG_FIELD_GET(XELP_MEM_TYPE_MASK, pat);
drm_printf(p, "PAT[%2d] = %s (%#8x)\n", i,
XELP_MEM_TYPE_STR_MAP[mem_type], pat);
}
- err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
-err_fw:
- xe_assert(xe, !err);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
static const struct xe_pat_ops xelp_pat_ops = {
@@ -206,11 +210,12 @@ static const struct xe_pat_ops xelp_pat_ops = {
static void xehp_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
- int i, err;
+ unsigned int fw_ref;
+ int i;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- goto err_fw;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return;
drm_printf(p, "PAT table:\n");
@@ -224,9 +229,7 @@ static void xehp_dump(struct xe_gt *gt, struct drm_printer *p)
XELP_MEM_TYPE_STR_MAP[mem_type], pat);
}
- err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
-err_fw:
- xe_assert(xe, !err);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
static const struct xe_pat_ops xehp_pat_ops = {
@@ -237,11 +240,12 @@ static const struct xe_pat_ops xehp_pat_ops = {
static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
- int i, err;
+ unsigned int fw_ref;
+ int i;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- goto err_fw;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return;
drm_printf(p, "PAT table:\n");
@@ -253,9 +257,7 @@ static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
REG_FIELD_GET(XEHPC_CLOS_LEVEL_MASK, pat), pat);
}
- err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
-err_fw:
- xe_assert(xe, !err);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
static const struct xe_pat_ops xehpc_pat_ops = {
@@ -266,11 +268,12 @@ static const struct xe_pat_ops xehpc_pat_ops = {
static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
- int i, err;
+ unsigned int fw_ref;
+ int i;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- goto err_fw;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return;
drm_printf(p, "PAT table:\n");
@@ -278,7 +281,7 @@ static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
u32 pat;
if (xe_gt_is_media_type(gt))
- pat = xe_mmio_read32(gt, XE_REG(_PAT_INDEX(i)));
+ pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_INDEX(i)));
else
pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
@@ -287,9 +290,7 @@ static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
REG_FIELD_GET(XELPG_INDEX_COH_MODE_MASK, pat), pat);
}
- err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
-err_fw:
- xe_assert(xe, !err);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
/*
@@ -316,27 +317,28 @@ static void xe2lpm_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry
int n_entries)
{
program_pat(gt, table, n_entries);
- xe_mmio_write32(gt, XE_REG(_PAT_ATS), xe2_pat_ats.value);
+ xe_mmio_write32(&gt->mmio, XE_REG(_PAT_ATS), xe2_pat_ats.value);
if (IS_DGFX(gt_to_xe(gt)))
- xe_mmio_write32(gt, XE_REG(_PAT_PTA), xe2_pat_pta.value);
+ xe_mmio_write32(&gt->mmio, XE_REG(_PAT_PTA), xe2_pat_pta.value);
}
static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
- int i, err;
+ unsigned int fw_ref;
u32 pat;
+ int i;
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- goto err_fw;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return;
drm_printf(p, "PAT table:\n");
for (i = 0; i < xe->pat.n_entries; i++) {
if (xe_gt_is_media_type(gt))
- pat = xe_mmio_read32(gt, XE_REG(_PAT_INDEX(i)));
+ pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_INDEX(i)));
else
pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
@@ -355,7 +357,7 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
* PPGTT entries.
*/
if (xe_gt_is_media_type(gt))
- pat = xe_mmio_read32(gt, XE_REG(_PAT_PTA));
+ pat = xe_mmio_read32(&gt->mmio, XE_REG(_PAT_PTA));
else
pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_PTA));
@@ -369,9 +371,7 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
REG_FIELD_GET(XE2_COH_MODE, pat),
pat);
- err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
-err_fw:
- xe_assert(xe, !err);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
static const struct xe_pat_ops xe2_pat_ops = {
@@ -382,7 +382,7 @@ static const struct xe_pat_ops xe2_pat_ops = {
void xe_pat_init_early(struct xe_device *xe)
{
- if (GRAPHICS_VER(xe) == 20) {
+ if (GRAPHICS_VER(xe) == 30 || GRAPHICS_VER(xe) == 20) {
xe->pat.ops = &xe2_pat_ops;
xe->pat.table = xe2_pat_table;
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 5e962e72c97e..ac4beaed58ff 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -13,7 +13,7 @@
#include <drm/drm_color_mgmt.h>
#include <drm/drm_drv.h>
-#include <drm/intel/xe_pciids.h>
+#include <drm/intel/pciids.h>
#include "display/xe_display.h"
#include "regs/xe_gt_regs.h"
@@ -30,6 +30,7 @@
#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_step.h"
+#include "xe_survivability_mode.h"
#include "xe_tile.h"
enum toggle_d3cold {
@@ -45,24 +46,30 @@ struct xe_subplatform_desc {
struct xe_device_desc {
/* Should only ever be set for platforms without GMD_ID */
- const struct xe_graphics_desc *graphics;
+ const struct xe_ip *pre_gmdid_graphics_ip;
/* Should only ever be set for platforms without GMD_ID */
- const struct xe_media_desc *media;
+ const struct xe_ip *pre_gmdid_media_ip;
const char *platform_name;
const struct xe_subplatform_desc *subplatforms;
enum xe_platform platform;
+ u8 dma_mask_size;
+ u8 max_remote_tiles:2;
+
u8 require_force_probe:1;
u8 is_dgfx:1;
u8 has_display:1;
+ u8 has_fan_control:1;
u8 has_heci_gscfi:1;
u8 has_heci_cscfi:1;
u8 has_llc:1;
- u8 has_mmio_ext:1;
+ u8 has_mbx_power_limits:1;
+ u8 has_pxp:1;
u8 has_sriov:1;
+ u8 needs_scratch:1;
u8 skip_guc_pc:1;
u8 skip_mtcfg:1;
u8 skip_pcode:1;
@@ -78,41 +85,18 @@ __diag_ignore_all("-Woverride-init", "Allow field overrides in table");
#define NOP(x) x
static const struct xe_graphics_desc graphics_xelp = {
- .name = "Xe_LP",
- .ver = 12,
- .rel = 0,
-
- .hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0),
-
- .dma_mask_size = 39,
- .va_bits = 48,
- .vm_max_level = 3,
-};
-
-static const struct xe_graphics_desc graphics_xelpp = {
- .name = "Xe_LP+",
- .ver = 12,
- .rel = 10,
-
.hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0),
- .dma_mask_size = 39,
.va_bits = 48,
.vm_max_level = 3,
};
#define XE_HP_FEATURES \
.has_range_tlb_invalidation = true, \
- .has_flat_ccs = true, \
- .dma_mask_size = 46, \
.va_bits = 48, \
.vm_max_level = 3
static const struct xe_graphics_desc graphics_xehpg = {
- .name = "Xe_HPG",
- .ver = 12,
- .rel = 55,
-
.hw_engine_mask =
BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) |
BIT(XE_HW_ENGINE_CCS0) | BIT(XE_HW_ENGINE_CCS1) |
@@ -120,13 +104,11 @@ static const struct xe_graphics_desc graphics_xehpg = {
XE_HP_FEATURES,
.vram_flags = XE_VRAM_FLAGS_NEED64K,
+
+ .has_flat_ccs = 1,
};
static const struct xe_graphics_desc graphics_xehpc = {
- .name = "Xe_HPC",
- .ver = 12,
- .rel = 60,
-
.hw_engine_mask =
BIT(XE_HW_ENGINE_BCS0) | BIT(XE_HW_ENGINE_BCS1) |
BIT(XE_HW_ENGINE_BCS2) | BIT(XE_HW_ENGINE_BCS3) |
@@ -137,36 +119,31 @@ static const struct xe_graphics_desc graphics_xehpc = {
BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3),
XE_HP_FEATURES,
- .dma_mask_size = 52,
- .max_remote_tiles = 1,
.va_bits = 57,
.vm_max_level = 4,
.vram_flags = XE_VRAM_FLAGS_NEED64K,
.has_asid = 1,
.has_atomic_enable_pte_bit = 1,
- .has_flat_ccs = 0,
.has_usm = 1,
};
static const struct xe_graphics_desc graphics_xelpg = {
- .name = "Xe_LPG",
.hw_engine_mask =
BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0) |
BIT(XE_HW_ENGINE_CCS0),
XE_HP_FEATURES,
- .has_flat_ccs = 0,
};
#define XE2_GFX_FEATURES \
- .dma_mask_size = 46, \
.has_asid = 1, \
.has_atomic_enable_pte_bit = 1, \
.has_flat_ccs = 1, \
.has_indirect_ring_state = 1, \
.has_range_tlb_invalidation = 1, \
.has_usm = 1, \
+ .has_64bit_timestamp = 1, \
.va_bits = 48, \
.vm_max_level = 4, \
.hw_engine_mask = \
@@ -175,71 +152,78 @@ static const struct xe_graphics_desc graphics_xelpg = {
GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0)
static const struct xe_graphics_desc graphics_xe2 = {
- .name = "Xe2_LPG / Xe2_HPG",
-
XE2_GFX_FEATURES,
};
static const struct xe_media_desc media_xem = {
- .name = "Xe_M",
- .ver = 12,
- .rel = 0,
-
- .hw_engine_mask =
- GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
- GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0),
-};
-
-static const struct xe_media_desc media_xehpm = {
- .name = "Xe_HPM",
- .ver = 12,
- .rel = 55,
-
.hw_engine_mask =
GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0),
};
static const struct xe_media_desc media_xelpmp = {
- .name = "Xe_LPM+",
.hw_engine_mask =
GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) |
BIT(XE_HW_ENGINE_GSCCS0)
};
-static const struct xe_media_desc media_xe2 = {
- .name = "Xe2_LPM / Xe2_HPM",
- .hw_engine_mask =
- GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
- GENMASK(XE_HW_ENGINE_VECS3, XE_HW_ENGINE_VECS0) |
- BIT(XE_HW_ENGINE_GSCCS0)
+/* Pre-GMDID Graphics IPs */
+static const struct xe_ip graphics_ip_xelp = { 1200, "Xe_LP", &graphics_xelp };
+static const struct xe_ip graphics_ip_xelpp = { 1210, "Xe_LP+", &graphics_xelp };
+static const struct xe_ip graphics_ip_xehpg = { 1255, "Xe_HPG", &graphics_xehpg };
+static const struct xe_ip graphics_ip_xehpc = { 1260, "Xe_HPC", &graphics_xehpc };
+
+/* GMDID-based Graphics IPs */
+static const struct xe_ip graphics_ips[] = {
+ { 1270, "Xe_LPG", &graphics_xelpg },
+ { 1271, "Xe_LPG", &graphics_xelpg },
+ { 1274, "Xe_LPG+", &graphics_xelpg },
+ { 2001, "Xe2_HPG", &graphics_xe2 },
+ { 2004, "Xe2_LPG", &graphics_xe2 },
+ { 3000, "Xe3_LPG", &graphics_xe2 },
+ { 3001, "Xe3_LPG", &graphics_xe2 },
+};
+
+/* Pre-GMDID Media IPs */
+static const struct xe_ip media_ip_xem = { 1200, "Xe_M", &media_xem };
+static const struct xe_ip media_ip_xehpm = { 1255, "Xe_HPM", &media_xem };
+
+/* GMDID-based Media IPs */
+static const struct xe_ip media_ips[] = {
+ { 1300, "Xe_LPM+", &media_xelpmp },
+ { 1301, "Xe2_HPM", &media_xelpmp },
+ { 2000, "Xe2_LPM", &media_xelpmp },
+ { 3000, "Xe3_LPM", &media_xelpmp },
};
static const struct xe_device_desc tgl_desc = {
- .graphics = &graphics_xelp,
- .media = &media_xem,
+ .pre_gmdid_graphics_ip = &graphics_ip_xelp,
+ .pre_gmdid_media_ip = &media_ip_xem,
PLATFORM(TIGERLAKE),
+ .dma_mask_size = 39,
.has_display = true,
.has_llc = true,
.require_force_probe = true,
};
static const struct xe_device_desc rkl_desc = {
- .graphics = &graphics_xelp,
- .media = &media_xem,
+ .pre_gmdid_graphics_ip = &graphics_ip_xelp,
+ .pre_gmdid_media_ip = &media_ip_xem,
PLATFORM(ROCKETLAKE),
+ .dma_mask_size = 39,
.has_display = true,
.has_llc = true,
.require_force_probe = true,
};
-static const u16 adls_rpls_ids[] = { XE_RPLS_IDS(NOP), 0 };
+static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 };
static const struct xe_device_desc adl_s_desc = {
- .graphics = &graphics_xelp,
- .media = &media_xem,
+ .pre_gmdid_graphics_ip = &graphics_ip_xelp,
+ .pre_gmdid_media_ip = &media_ip_xem,
PLATFORM(ALDERLAKE_S),
+ .dma_mask_size = 39,
.has_display = true,
.has_llc = true,
.require_force_probe = true,
@@ -249,12 +233,13 @@ static const struct xe_device_desc adl_s_desc = {
},
};
-static const u16 adlp_rplu_ids[] = { XE_RPLU_IDS(NOP), 0 };
+static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 };
static const struct xe_device_desc adl_p_desc = {
- .graphics = &graphics_xelp,
- .media = &media_xem,
+ .pre_gmdid_graphics_ip = &graphics_ip_xelp,
+ .pre_gmdid_media_ip = &media_ip_xem,
PLATFORM(ALDERLAKE_P),
+ .dma_mask_size = 39,
.has_display = true,
.has_llc = true,
.require_force_probe = true,
@@ -265,9 +250,10 @@ static const struct xe_device_desc adl_p_desc = {
};
static const struct xe_device_desc adl_n_desc = {
- .graphics = &graphics_xelp,
- .media = &media_xem,
+ .pre_gmdid_graphics_ip = &graphics_ip_xelp,
+ .pre_gmdid_media_ip = &media_ip_xem,
PLATFORM(ALDERLAKE_N),
+ .dma_mask_size = 39,
.has_display = true,
.has_llc = true,
.require_force_probe = true,
@@ -277,18 +263,19 @@ static const struct xe_device_desc adl_n_desc = {
.is_dgfx = 1
static const struct xe_device_desc dg1_desc = {
- .graphics = &graphics_xelpp,
- .media = &media_xem,
+ .pre_gmdid_graphics_ip = &graphics_ip_xelpp,
+ .pre_gmdid_media_ip = &media_ip_xem,
DGFX_FEATURES,
PLATFORM(DG1),
+ .dma_mask_size = 39,
.has_display = true,
.has_heci_gscfi = 1,
.require_force_probe = true,
};
-static const u16 dg2_g10_ids[] = { XE_DG2_G10_IDS(NOP), XE_ATS_M150_IDS(NOP), 0 };
-static const u16 dg2_g11_ids[] = { XE_DG2_G11_IDS(NOP), XE_ATS_M75_IDS(NOP), 0 };
-static const u16 dg2_g12_ids[] = { XE_DG2_G12_IDS(NOP), 0 };
+static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 };
+static const u16 dg2_g11_ids[] = { INTEL_DG2_G11_IDS(NOP), INTEL_ATS_M75_IDS(NOP), 0 };
+static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 };
#define DG2_FEATURES \
DGFX_FEATURES, \
@@ -302,8 +289,9 @@ static const u16 dg2_g12_ids[] = { XE_DG2_G12_IDS(NOP), 0 };
}
static const struct xe_device_desc ats_m_desc = {
- .graphics = &graphics_xehpg,
- .media = &media_xehpm,
+ .pre_gmdid_graphics_ip = &graphics_ip_xehpg,
+ .pre_gmdid_media_ip = &media_ip_xehpm,
+ .dma_mask_size = 46,
.require_force_probe = true,
DG2_FEATURES,
@@ -311,65 +299,68 @@ static const struct xe_device_desc ats_m_desc = {
};
static const struct xe_device_desc dg2_desc = {
- .graphics = &graphics_xehpg,
- .media = &media_xehpm,
+ .pre_gmdid_graphics_ip = &graphics_ip_xehpg,
+ .pre_gmdid_media_ip = &media_ip_xehpm,
+ .dma_mask_size = 46,
.require_force_probe = true,
DG2_FEATURES,
.has_display = true,
+ .has_fan_control = true,
+ .has_mbx_power_limits = false,
};
static const __maybe_unused struct xe_device_desc pvc_desc = {
- .graphics = &graphics_xehpc,
+ .pre_gmdid_graphics_ip = &graphics_ip_xehpc,
DGFX_FEATURES,
PLATFORM(PVC),
+ .dma_mask_size = 52,
.has_display = false,
.has_heci_gscfi = 1,
+ .max_remote_tiles = 1,
.require_force_probe = true,
+ .has_mbx_power_limits = false,
};
static const struct xe_device_desc mtl_desc = {
/* .graphics and .media determined via GMD_ID */
.require_force_probe = true,
PLATFORM(METEORLAKE),
+ .dma_mask_size = 46,
.has_display = true,
+ .has_pxp = true,
};
static const struct xe_device_desc lnl_desc = {
PLATFORM(LUNARLAKE),
+ .dma_mask_size = 46,
.has_display = true,
+ .has_pxp = true,
+ .needs_scratch = true,
};
static const struct xe_device_desc bmg_desc = {
DGFX_FEATURES,
PLATFORM(BATTLEMAGE),
+ .dma_mask_size = 46,
.has_display = true,
+ .has_fan_control = true,
+ .has_mbx_power_limits = true,
.has_heci_cscfi = 1,
+ .needs_scratch = true,
};
-#undef PLATFORM
-__diag_pop();
-
-/* Map of GMD_ID values to graphics IP */
-static const struct gmdid_map graphics_ip_map[] = {
- { 1270, &graphics_xelpg },
- { 1271, &graphics_xelpg },
- { 1274, &graphics_xelpg }, /* Xe_LPG+ */
- { 2001, &graphics_xe2 },
- { 2004, &graphics_xe2 },
-};
-
-/* Map of GMD_ID values to media IP */
-static const struct gmdid_map media_ip_map[] = {
- { 1300, &media_xelpmp },
- { 1301, &media_xe2 },
- { 2000, &media_xe2 },
+static const struct xe_device_desc ptl_desc = {
+ PLATFORM(PANTHERLAKE),
+ .dma_mask_size = 46,
+ .has_display = true,
+ .has_sriov = true,
+ .require_force_probe = true,
+ .needs_scratch = true,
};
-#define INTEL_VGA_DEVICE(id, info) { \
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, id), \
- PCI_BASE_CLASS_DISPLAY << 16, 0xff << 16, \
- (unsigned long) info }
+#undef PLATFORM
+__diag_pop();
/*
* Make sure any device matches here are from most specific to most
@@ -378,25 +369,26 @@ static const struct gmdid_map media_ip_map[] = {
* PCI ID matches, otherwise we'll use the wrong info struct above.
*/
static const struct pci_device_id pciidlist[] = {
- XE_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc),
- XE_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc),
- XE_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
- XE_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
- XE_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc),
- XE_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
- XE_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
- XE_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc),
- XE_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc),
- XE_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc),
- XE_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
- XE_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc),
- XE_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc),
+ INTEL_TGL_IDS(INTEL_VGA_DEVICE, &tgl_desc),
+ INTEL_RKL_IDS(INTEL_VGA_DEVICE, &rkl_desc),
+ INTEL_ADLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
+ INTEL_ADLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
+ INTEL_ADLN_IDS(INTEL_VGA_DEVICE, &adl_n_desc),
+ INTEL_RPLU_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
+ INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &adl_p_desc),
+ INTEL_RPLS_IDS(INTEL_VGA_DEVICE, &adl_s_desc),
+ INTEL_DG1_IDS(INTEL_VGA_DEVICE, &dg1_desc),
+ INTEL_ATS_M_IDS(INTEL_VGA_DEVICE, &ats_m_desc),
+ INTEL_ARL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
+ INTEL_DG2_IDS(INTEL_VGA_DEVICE, &dg2_desc),
+ INTEL_MTL_IDS(INTEL_VGA_DEVICE, &mtl_desc),
+ INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc),
+ INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc),
+ INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
{ }
};
MODULE_DEVICE_TABLE(pci, pciidlist);
-#undef INTEL_VGA_DEVICE
-
/* is device_id present in comma separated list of ids */
static bool device_id_in_list(u16 device_id, const char *devices, bool negative)
{
@@ -467,13 +459,15 @@ enum xe_gmdid_type {
static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid)
{
- struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
struct xe_reg gmdid_reg = GMD_ID;
u32 val;
KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid);
if (IS_SRIOV_VF(xe)) {
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+
/*
* To get the value of the GMDID register, VFs must obtain it
* from the GuC using MMIO communication.
@@ -484,7 +478,7 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
* least basic xe_gt and xe_guc initialization.
*
* Since to obtain the value of GMDID_MEDIA we need to use the
- * media GuC, temporarly tweak the gt type.
+ * media GuC, temporarily tweak the gt type.
*/
xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED);
@@ -496,6 +490,7 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
gt->info.type = XE_GT_TYPE_MAIN;
}
+ xe_gt_mmio_init(gt);
xe_guc_comm_init_early(&gt->uc.guc);
/* Don't bother with GMDID if failed to negotiate the GuC ABI */
@@ -509,14 +504,17 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
gt->info.type = XE_GT_TYPE_UNINITIALIZED;
} else {
/*
- * We need to apply the GSI offset explicitly here as at this
- * point the xe_gt is not fully uninitialized and only basic
- * access to MMIO registers is possible.
+ * GMD_ID is a GT register, but at this point in the driver
+ * init we haven't fully initialized the GT yet so we need to
+ * read the register with the tile's MMIO accessor. That means
+ * we need to apply the GSI offset manually since it won't get
+ * automatically added as it would if we were using a GT mmio
+ * accessor.
*/
if (type == GMDID_MEDIA)
gmdid_reg.addr += MEDIA_GT_GSI_OFFSET;
- val = xe_mmio_read32(gt, gmdid_reg);
+ val = xe_mmio_read32(mmio, gmdid_reg);
}
*ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val);
@@ -524,66 +522,49 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
}
/*
- * Pre-GMD_ID platform: device descriptor already points to the appropriate
- * graphics descriptor. Simply forward the description and calculate the version
- * appropriately. "graphics" should be present in all such platforms, while
- * media is optional.
- */
-static void handle_pre_gmdid(struct xe_device *xe,
- const struct xe_graphics_desc *graphics,
- const struct xe_media_desc *media)
-{
- xe->info.graphics_verx100 = graphics->ver * 100 + graphics->rel;
-
- if (media)
- xe->info.media_verx100 = media->ver * 100 + media->rel;
-
-}
-
-/*
- * GMD_ID platform: read IP version from hardware and select graphics descriptor
+ * Read IP version from hardware and select graphics/media IP descriptors
* based on the result.
*/
static void handle_gmdid(struct xe_device *xe,
- const struct xe_graphics_desc **graphics,
- const struct xe_media_desc **media,
+ const struct xe_ip **graphics_ip,
+ const struct xe_ip **media_ip,
u32 *graphics_revid,
u32 *media_revid)
{
u32 ver;
+ *graphics_ip = NULL;
+ *media_ip = NULL;
+
read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid);
- for (int i = 0; i < ARRAY_SIZE(graphics_ip_map); i++) {
- if (ver == graphics_ip_map[i].ver) {
- xe->info.graphics_verx100 = ver;
- *graphics = graphics_ip_map[i].ip;
+ for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) {
+ if (ver == graphics_ips[i].verx100) {
+ *graphics_ip = &graphics_ips[i];
break;
}
}
- if (!xe->info.graphics_verx100) {
+ if (!*graphics_ip) {
drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n",
ver / 100, ver % 100);
}
read_gmdid(xe, GMDID_MEDIA, &ver, media_revid);
-
/* Media may legitimately be fused off / not present */
if (ver == 0)
return;
- for (int i = 0; i < ARRAY_SIZE(media_ip_map); i++) {
- if (ver == media_ip_map[i].ver) {
- xe->info.media_verx100 = ver;
- *media = media_ip_map[i].ip;
+ for (int i = 0; i < ARRAY_SIZE(media_ips); i++) {
+ if (ver == media_ips[i].verx100) {
+ *media_ip = &media_ips[i];
break;
}
}
- if (!xe->info.media_verx100) {
+ if (!*media_ip) {
drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n",
ver / 100, ver % 100);
}
@@ -604,19 +585,24 @@ static int xe_info_init_early(struct xe_device *xe,
xe->info.subplatform = subplatform_desc ?
subplatform_desc->subplatform : XE_SUBPLATFORM_NONE;
+ xe->info.dma_mask_size = desc->dma_mask_size;
xe->info.is_dgfx = desc->is_dgfx;
+ xe->info.has_fan_control = desc->has_fan_control;
+ xe->info.has_mbx_power_limits = desc->has_mbx_power_limits;
xe->info.has_heci_gscfi = desc->has_heci_gscfi;
xe->info.has_heci_cscfi = desc->has_heci_cscfi;
xe->info.has_llc = desc->has_llc;
- xe->info.has_mmio_ext = desc->has_mmio_ext;
+ xe->info.has_pxp = desc->has_pxp;
xe->info.has_sriov = desc->has_sriov;
xe->info.skip_guc_pc = desc->skip_guc_pc;
xe->info.skip_mtcfg = desc->skip_mtcfg;
xe->info.skip_pcode = desc->skip_pcode;
+ xe->info.needs_scratch = desc->needs_scratch;
xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
xe_modparam.probe_display &&
desc->has_display;
+ xe->info.tile_count = 1 + desc->max_remote_tiles;
err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0);
if (err)
@@ -632,26 +618,31 @@ static int xe_info_init_early(struct xe_device *xe,
* present in device info.
*/
static int xe_info_init(struct xe_device *xe,
- const struct xe_graphics_desc *graphics_desc,
- const struct xe_media_desc *media_desc)
+ const struct xe_device_desc *desc)
{
u32 graphics_gmdid_revid = 0, media_gmdid_revid = 0;
+ const struct xe_ip *graphics_ip;
+ const struct xe_ip *media_ip;
+ const struct xe_graphics_desc *graphics_desc;
+ const struct xe_media_desc *media_desc;
struct xe_tile *tile;
struct xe_gt *gt;
u8 id;
/*
* If this platform supports GMD_ID, we'll detect the proper IP
- * descriptor to use from hardware registers. desc->graphics will only
- * ever be set at this point for platforms before GMD_ID. In that case
- * the IP descriptions and versions are simply derived from that.
+ * descriptor to use from hardware registers.
+ * desc->pre_gmdid_graphics_ip will only ever be set at this point for
+ * platforms before GMD_ID. In that case the IP descriptions and
+ * versions are simply derived from that.
*/
- if (graphics_desc) {
- handle_pre_gmdid(xe, graphics_desc, media_desc);
+ if (desc->pre_gmdid_graphics_ip) {
+ graphics_ip = desc->pre_gmdid_graphics_ip;
+ media_ip = desc->pre_gmdid_media_ip;
xe->info.step = xe_step_pre_gmdid_get(xe);
} else {
- xe_assert(xe, !media_desc);
- handle_gmdid(xe, &graphics_desc, &media_desc,
+ xe_assert(xe, !desc->pre_gmdid_media_ip);
+ handle_gmdid(xe, &graphics_ip, &media_ip,
&graphics_gmdid_revid, &media_gmdid_revid);
xe->info.step = xe_step_gmdid_get(xe,
graphics_gmdid_revid,
@@ -663,14 +654,22 @@ static int xe_info_init(struct xe_device *xe,
* error and we should abort driver load. Failing to detect media
* IP is non-fatal; we'll just proceed without enabling media support.
*/
- if (!graphics_desc)
+ if (!graphics_ip)
return -ENODEV;
- xe->info.graphics_name = graphics_desc->name;
- xe->info.media_name = media_desc ? media_desc->name : "none";
- xe->info.tile_mmio_ext_size = graphics_desc->tile_mmio_ext_size;
+ xe->info.graphics_verx100 = graphics_ip->verx100;
+ xe->info.graphics_name = graphics_ip->name;
+ graphics_desc = graphics_ip->desc;
+
+ if (media_ip) {
+ xe->info.media_verx100 = media_ip->verx100;
+ xe->info.media_name = media_ip->name;
+ media_desc = media_ip->desc;
+ } else {
+ xe->info.media_name = "none";
+ media_desc = NULL;
+ }
- xe->info.dma_mask_size = graphics_desc->dma_mask_size;
xe->info.vram_flags = graphics_desc->vram_flags;
xe->info.va_bits = graphics_desc->va_bits;
xe->info.vm_max_level = graphics_desc->vm_max_level;
@@ -678,20 +677,13 @@ static int xe_info_init(struct xe_device *xe,
xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit;
if (xe->info.platform != XE_PVC)
xe->info.has_device_atomics_on_smem = 1;
+
+ /* Runtime detection may change this later */
xe->info.has_flat_ccs = graphics_desc->has_flat_ccs;
+
xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation;
xe->info.has_usm = graphics_desc->has_usm;
-
- /*
- * All platforms have at least one primary GT. Any platform with media
- * version 13 or higher has an additional dedicated media GT. And
- * depending on the graphics IP there may be additional "remote tiles."
- * All of these together determine the overall GT count.
- *
- * FIXME: 'tile_count' here is misnamed since the rest of the driver
- * treats it as the number of GTs rather than just the number of tiles.
- */
- xe->info.tile_count = 1 + graphics_desc->max_remote_tiles;
+ xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp;
for_each_remote_tile(tile, xe, id) {
int err;
@@ -701,12 +693,19 @@ static int xe_info_init(struct xe_device *xe,
return err;
}
+ /*
+ * All platforms have at least one primary GT. Any platform with media
+ * version 13 or higher has an additional dedicated media GT. And
+ * depending on the graphics IP there may be additional "remote tiles."
+ * All of these together determine the overall GT count.
+ */
for_each_tile(tile, xe, id) {
gt = tile->primary_gt;
gt->info.id = xe->info.gt_count++;
gt->info.type = XE_GT_TYPE_MAIN;
gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state;
gt->info.engine_mask = graphics_desc->hw_engine_mask;
+
if (MEDIA_VER(xe) < 13 && media_desc)
gt->info.engine_mask |= media_desc->hw_engine_mask;
@@ -725,8 +724,6 @@ static int xe_info_init(struct xe_device *xe,
gt->info.type = XE_GT_TYPE_MEDIA;
gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state;
gt->info.engine_mask = media_desc->hw_engine_mask;
- gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET;
- gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH;
/*
* FIXME: At the moment multi-tile and standalone media are
@@ -743,20 +740,37 @@ static int xe_info_init(struct xe_device *xe,
static void xe_pci_remove(struct pci_dev *pdev)
{
- struct xe_device *xe;
-
- xe = pdev_to_xe_device(pdev);
- if (!xe) /* driver load aborted, nothing to cleanup */
- return;
+ struct xe_device *xe = pdev_to_xe_device(pdev);
if (IS_SRIOV_PF(xe))
xe_pci_sriov_configure(pdev, 0);
+ if (xe_survivability_mode_is_enabled(xe))
+ return;
+
xe_device_remove(xe);
- xe_pm_runtime_fini(xe);
- pci_set_drvdata(pdev, NULL);
+ xe_pm_fini(xe);
}
+/*
+ * Probe the PCI device, initialize various parts of the driver.
+ *
+ * Fault injection is used to test the error paths of some initialization
+ * functions called either directly from xe_pci_probe() or indirectly for
+ * example through xe_device_probe(). Those functions use the kernel fault
+ * injection capabilities infrastructure, see
+ * Documentation/fault-injection/fault-injection.rst for details. The macro
+ * ALLOW_ERROR_INJECTION() is used to conditionally skip function execution
+ * at runtime and use a provided return value. The first requirement for
+ * error injectable functions is proper handling of the error code by the
+ * caller for recovery, which is always the case here. The second
+ * requirement is that no state is changed before the first error return.
+ * It is not strictly fulfilled for all initialization functions using the
+ * ALLOW_ERROR_INJECTION() macro but this is acceptable because for those
+ * error cases at probe time, the error code is simply propagated up by the
+ * caller. Therefore there is no consequence on those specific callers when
+ * function error injection skips the whole function.
+ */
static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct xe_device_desc *desc = (const void *)ent->driver_data;
@@ -805,10 +819,19 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
err = xe_device_probe_early(xe);
+ /*
+ * In Boot Survivability mode, no drm card is exposed and driver
+ * is loaded with bare minimum to allow for firmware to be
+ * flashed through mei. Return success, if survivability mode
+ * is enabled due to pcode failure or configfs being set
+ */
+ if (xe_survivability_mode_is_enabled(xe))
+ return 0;
+
if (err)
return err;
- err = xe_info_init(xe, desc->graphics, desc->media);
+ err = xe_info_init(xe, desc);
if (err)
return err;
@@ -893,9 +916,13 @@ static void d3cold_toggle(struct pci_dev *pdev, enum toggle_d3cold toggle)
static int xe_pci_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct xe_device *xe = pdev_to_xe_device(pdev);
int err;
- err = xe_pm_suspend(pdev_to_xe_device(pdev));
+ if (xe_survivability_mode_is_enabled(xe))
+ return -EBUSY;
+
+ err = xe_pm_suspend(xe);
if (err)
return err;
@@ -908,6 +935,7 @@ static int xe_pci_suspend(struct device *dev)
pci_save_state(pdev);
pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c
index aaceee748287..8813efdcafbb 100644
--- a/drivers/gpu/drm/xe/xe_pci_sriov.c
+++ b/drivers/gpu/drm/xe/xe_pci_sriov.c
@@ -7,6 +7,8 @@
#include "xe_device.h"
#include "xe_gt_sriov_pf_config.h"
#include "xe_gt_sriov_pf_control.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_guc_engine_activity.h"
#include "xe_pci_sriov.h"
#include "xe_pm.h"
#include "xe_sriov.h"
@@ -62,6 +64,69 @@ static void pf_reset_vfs(struct xe_device *xe, unsigned int num_vfs)
xe_gt_sriov_pf_control_trigger_flr(gt, n);
}
+static struct pci_dev *xe_pci_pf_get_vf_dev(struct xe_device *xe, unsigned int vf_id)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ /* caller must use pci_dev_put() */
+ return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+ pdev->bus->number,
+ pci_iov_virtfn_devfn(pdev, vf_id));
+}
+
+static void pf_link_vfs(struct xe_device *xe, int num_vfs)
+{
+ struct pci_dev *pdev_pf = to_pci_dev(xe->drm.dev);
+ struct device_link *link;
+ struct pci_dev *pdev_vf;
+ unsigned int n;
+
+ /*
+ * When both PF and VF devices are enabled on the host, during system
+ * resume they are resuming in parallel.
+ *
+ * But PF has to complete the provision of VF first to allow any VFs to
+ * successfully resume.
+ *
+ * Create a parent-child device link between PF and VF devices that will
+ * enforce correct resume order.
+ */
+ for (n = 1; n <= num_vfs; n++) {
+ pdev_vf = xe_pci_pf_get_vf_dev(xe, n - 1);
+
+ /* unlikely, something weird is happening, abort */
+ if (!pdev_vf) {
+ xe_sriov_err(xe, "Cannot find VF%u device, aborting link%s creation!\n",
+ n, str_plural(num_vfs));
+ break;
+ }
+
+ link = device_link_add(&pdev_vf->dev, &pdev_pf->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER);
+ /* unlikely and harmless, continue with other VFs */
+ if (!link)
+ xe_sriov_notice(xe, "Failed linking VF%u\n", n);
+
+ pci_dev_put(pdev_vf);
+ }
+}
+
+static void pf_engine_activity_stats(struct xe_device *xe, unsigned int num_vfs, bool enable)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int ret = 0;
+
+ for_each_gt(gt, xe, id) {
+ ret = xe_guc_engine_activity_function_stats(&gt->uc.guc, num_vfs, enable);
+ if (ret)
+ xe_gt_sriov_info(gt, "Failed to %s engine activity function stats (%pe)\n",
+ str_enable_disable(enable), ERR_PTR(ret));
+ }
+}
+
static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -92,8 +157,13 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
if (err < 0)
goto failed;
+ pf_link_vfs(xe, num_vfs);
+
xe_sriov_info(xe, "Enabled %u of %u VF%s\n",
num_vfs, total_vfs, str_plural(total_vfs));
+
+ pf_engine_activity_stats(xe, num_vfs, true);
+
return num_vfs;
failed:
@@ -117,6 +187,8 @@ static int pf_disable_vfs(struct xe_device *xe)
if (!num_vfs)
return 0;
+ pf_engine_activity_stats(xe, num_vfs, false);
+
pci_disable_sriov(pdev);
pf_reset_vfs(xe, num_vfs);
diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h
index 79b0f80376a4..ca6b10d35573 100644
--- a/drivers/gpu/drm/xe/xe_pci_types.h
+++ b/drivers/gpu/drm/xe/xe_pci_types.h
@@ -9,42 +9,31 @@
#include <linux/types.h>
struct xe_graphics_desc {
- const char *name;
- u8 ver;
- u8 rel;
-
- u8 dma_mask_size; /* available DMA address bits */
u8 va_bits;
u8 vm_max_level;
u8 vram_flags;
u64 hw_engine_mask; /* hardware engines provided by graphics IP */
- u32 tile_mmio_ext_size; /* size of MMIO extension space, per-tile */
-
- u8 max_remote_tiles:2;
-
u8 has_asid:1;
u8 has_atomic_enable_pte_bit:1;
u8 has_flat_ccs:1;
u8 has_indirect_ring_state:1;
u8 has_range_tlb_invalidation:1;
u8 has_usm:1;
+ u8 has_64bit_timestamp:1;
};
struct xe_media_desc {
- const char *name;
- u8 ver;
- u8 rel;
-
u64 hw_engine_mask; /* hardware engines provided by media IP */
u8 has_indirect_ring_state:1;
};
-struct gmdid_map {
- unsigned int ver;
- const void *ip;
+struct xe_ip {
+ unsigned int verx100;
+ const char *name;
+ const void *desc;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index 7397d556996a..9189117fe825 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -7,6 +7,7 @@
#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/error-injection.h>
#include <drm/drm_managed.h>
@@ -44,7 +45,7 @@ static int pcode_mailbox_status(struct xe_tile *tile)
[PCODE_ERROR_MASK] = {-EPROTO, "Unknown"},
};
- err = xe_mmio_read32(tile->primary_gt, PCODE_MAILBOX) & PCODE_ERROR_MASK;
+ err = xe_mmio_read32(&tile->mmio, PCODE_MAILBOX) & PCODE_ERROR_MASK;
if (err) {
drm_err(&tile_to_xe(tile)->drm, "PCODE Mailbox failed: %d %s", err,
err_decode[err].str ?: "Unknown");
@@ -58,7 +59,7 @@ static int __pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *d
unsigned int timeout_ms, bool return_data,
bool atomic)
{
- struct xe_gt *mmio = tile->primary_gt;
+ struct xe_mmio *mmio = &tile->mmio;
int err;
if (tile_to_xe(tile)->info.skip_pcode)
@@ -108,6 +109,17 @@ int xe_pcode_write_timeout(struct xe_tile *tile, u32 mbox, u32 data, int timeout
return err;
}
+int xe_pcode_write64_timeout(struct xe_tile *tile, u32 mbox, u32 data0, u32 data1, int timeout)
+{
+ int err;
+
+ mutex_lock(&tile->pcode.lock);
+ err = pcode_mailbox_rw(tile, mbox, &data0, &data1, timeout, false, false);
+ mutex_unlock(&tile->pcode.lock);
+
+ return err;
+}
+
int xe_pcode_read(struct xe_tile *tile, u32 mbox, u32 *val, u32 *val1)
{
int err;
@@ -217,7 +229,7 @@ out:
*
* It returns 0 on success, and -ERROR number on failure, -EINVAL if max
* frequency is higher then the minimal, and other errors directly translated
- * from the PCODE Error returs:
+ * from the PCODE Error returns:
* - -ENXIO: "Illegal Command"
* - -ETIMEDOUT: "Timed out"
* - -EINVAL: "Illegal Data"
@@ -323,3 +335,4 @@ int xe_pcode_probe_early(struct xe_device *xe)
{
return xe_pcode_ready(xe, false);
}
+ALLOW_ERROR_INJECTION(xe_pcode_probe_early, ERRNO); /* See xe_pci_probe */
diff --git a/drivers/gpu/drm/xe/xe_pcode.h b/drivers/gpu/drm/xe/xe_pcode.h
index ba33991d72a7..de38f44f3201 100644
--- a/drivers/gpu/drm/xe/xe_pcode.h
+++ b/drivers/gpu/drm/xe/xe_pcode.h
@@ -18,6 +18,9 @@ int xe_pcode_init_min_freq_table(struct xe_tile *tile, u32 min_gt_freq,
int xe_pcode_read(struct xe_tile *tile, u32 mbox, u32 *val, u32 *val1);
int xe_pcode_write_timeout(struct xe_tile *tile, u32 mbox, u32 val,
int timeout_ms);
+int xe_pcode_write64_timeout(struct xe_tile *tile, u32 mbox, u32 data0,
+ u32 data1, int timeout);
+
#define xe_pcode_write(tile, mbox, val) \
xe_pcode_write_timeout(tile, mbox, val, 1)
diff --git a/drivers/gpu/drm/xe/xe_pcode_api.h b/drivers/gpu/drm/xe/xe_pcode_api.h
index f153ce96f69a..0befdea77db1 100644
--- a/drivers/gpu/drm/xe/xe_pcode_api.h
+++ b/drivers/gpu/drm/xe/xe_pcode_api.h
@@ -34,6 +34,7 @@
#define DGFX_PCODE_STATUS 0x7E
#define DGFX_GET_INIT_STATUS 0x0
#define DGFX_INIT_STATUS_COMPLETE 0x1
+#define DGFX_LINK_DOWNGRADE_STATUS REG_BIT(31)
#define PCODE_POWER_SETUP 0x7C
#define POWER_SETUP_SUBCOMMAND_READ_I1 0x4
@@ -42,6 +43,13 @@
#define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */
#define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0)
+#define READ_PSYSGPU_POWER_LIMIT 0x6
+#define WRITE_PSYSGPU_POWER_LIMIT 0x7
+#define READ_PACKAGE_POWER_LIMIT 0x8
+#define WRITE_PACKAGE_POWER_LIMIT 0x9
+#define READ_PL_FROM_FW 0x1
+#define READ_PL_FROM_PCODE 0x0
+
#define PCODE_FREQUENCY_CONFIG 0x6e
/* Frequency Config Sub Commands (param1) */
#define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0
@@ -49,6 +57,27 @@
/* Domain IDs (param2) */
#define PCODE_MBOX_DOMAIN_HBM 0x2
+#define FAN_SPEED_CONTROL 0x7D
+#define FSC_READ_NUM_FANS 0x4
+
+#define PCODE_SCRATCH(x) XE_REG(0x138320 + ((x) * 4))
+/* PCODE_SCRATCH0 */
+#define AUXINFO_REG_OFFSET REG_GENMASK(17, 15)
+#define OVERFLOW_REG_OFFSET REG_GENMASK(14, 12)
+#define HISTORY_TRACKING REG_BIT(11)
+#define OVERFLOW_SUPPORT REG_BIT(10)
+#define AUXINFO_SUPPORT REG_BIT(9)
+#define BOOT_STATUS REG_GENMASK(3, 1)
+#define CRITICAL_FAILURE 4
+#define NON_CRITICAL_FAILURE 7
+
+/* Auxiliary info bits */
+#define AUXINFO_HISTORY_OFFSET REG_GENMASK(31, 29)
+
+#define BMG_PCIE_CAP XE_REG(0x138340)
+#define LINK_DOWNGRADE REG_GENMASK(1, 0)
+#define DOWNGRADE_CAPABLE 2
+
struct pcode_err_decode {
int errno;
const char *str;
diff --git a/drivers/gpu/drm/xe/xe_platform_types.h b/drivers/gpu/drm/xe/xe_platform_types.h
index 79b7042c4534..d08574c4cdb8 100644
--- a/drivers/gpu/drm/xe/xe_platform_types.h
+++ b/drivers/gpu/drm/xe/xe_platform_types.h
@@ -23,6 +23,7 @@ enum xe_platform {
XE_METEORLAKE,
XE_LUNARLAKE,
XE_BATTLEMAGE,
+ XE_PANTHERLAKE,
};
enum xe_subplatform {
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 7cf2160fe040..ff749edc005b 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -5,7 +5,9 @@
#include "xe_pm.h"
+#include <linux/fault-inject.h>
#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
#include <drm/drm_managed.h>
#include <drm/ttm/ttm_placement.h>
@@ -14,12 +16,12 @@
#include "xe_bo.h"
#include "xe_bo_evict.h"
#include "xe_device.h"
-#include "xe_device_sysfs.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_guc.h"
#include "xe_irq.h"
#include "xe_pcode.h"
+#include "xe_pxp.h"
#include "xe_trace.h"
#include "xe_wa.h"
@@ -88,7 +90,7 @@ static struct lockdep_map xe_pm_runtime_nod3cold_map = {
*/
bool xe_rpm_reclaim_safe(const struct xe_device *xe)
{
- return !xe->d3cold.capable && !xe->info.has_sriov;
+ return !xe->d3cold.capable;
}
static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
@@ -120,22 +122,24 @@ int xe_pm_suspend(struct xe_device *xe)
drm_dbg(&xe->drm, "Suspending device\n");
trace_xe_pm_suspend(xe, __builtin_return_address(0));
+ err = xe_pxp_pm_suspend(xe->pxp);
+ if (err)
+ goto err;
+
for_each_gt(gt, xe, id)
xe_gt_suspend_prepare(gt);
- xe_display_pm_suspend(xe, false);
+ xe_display_pm_suspend(xe);
/* FIXME: Super racey... */
err = xe_bo_evict_all(xe);
if (err)
- goto err;
+ goto err_pxp;
for_each_gt(gt, xe, id) {
err = xe_gt_suspend(gt);
- if (err) {
- xe_display_pm_resume(xe, false);
- goto err;
- }
+ if (err)
+ goto err_display;
}
xe_irq_suspend(xe);
@@ -144,6 +148,11 @@ int xe_pm_suspend(struct xe_device *xe)
drm_dbg(&xe->drm, "Device suspended\n");
return 0;
+
+err_display:
+ xe_display_pm_resume(xe);
+err_pxp:
+ xe_pxp_pm_resume(xe->pxp);
err:
drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
return err;
@@ -178,7 +187,7 @@ int xe_pm_resume(struct xe_device *xe)
* This only restores pinned memory which is the memory required for the
* GT(s) to resume.
*/
- err = xe_bo_restore_kernel(xe);
+ err = xe_bo_restore_early(xe);
if (err)
goto err;
@@ -187,12 +196,14 @@ int xe_pm_resume(struct xe_device *xe)
for_each_gt(gt, xe, id)
xe_gt_resume(gt);
- xe_display_pm_resume(xe, false);
+ xe_display_pm_resume(xe);
- err = xe_bo_restore_user(xe);
+ err = xe_bo_restore_late(xe);
if (err)
goto err;
+ xe_pxp_pm_resume(xe->pxp);
+
drm_dbg(&xe->drm, "Device resumed\n");
return 0;
err:
@@ -261,8 +272,55 @@ int xe_pm_init_early(struct xe_device *xe)
if (err)
return err;
+ xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
return 0;
}
+ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */
+
+static u32 vram_threshold_value(struct xe_device *xe)
+{
+ /* FIXME: D3Cold temporarily disabled by default on BMG */
+ if (xe->info.platform == XE_BATTLEMAGE)
+ return 0;
+
+ return DEFAULT_VRAM_THRESHOLD;
+}
+
+static int xe_pm_notifier_callback(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct xe_device *xe = container_of(nb, struct xe_device, pm_notifier);
+ int err = 0;
+
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ xe_pm_runtime_get(xe);
+ err = xe_bo_evict_all_user(xe);
+ if (err) {
+ drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err);
+ xe_pm_runtime_put(xe);
+ break;
+ }
+
+ err = xe_bo_notifier_prepare_all_pinned(xe);
+ if (err) {
+ drm_dbg(&xe->drm, "Notifier prepare pin failed (%d)\n", err);
+ xe_pm_runtime_put(xe);
+ }
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ xe_bo_notifier_unprepare_all_pinned(xe);
+ xe_pm_runtime_put(xe);
+ break;
+ }
+
+ if (err)
+ return NOTIFY_BAD;
+
+ return NOTIFY_DONE;
+}
/**
* xe_pm_init - Initialize Xe Power Management
@@ -274,34 +332,34 @@ int xe_pm_init_early(struct xe_device *xe)
*/
int xe_pm_init(struct xe_device *xe)
{
+ u32 vram_threshold;
int err;
+ xe->pm_notifier.notifier_call = xe_pm_notifier_callback;
+ err = register_pm_notifier(&xe->pm_notifier);
+ if (err)
+ return err;
+
/* For now suspend/resume is only allowed with GuC */
if (!xe_device_uc_enabled(xe))
return 0;
- xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe);
-
if (xe->d3cold.capable) {
- err = xe_device_sysfs_init(xe);
- if (err)
- return err;
-
- err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD);
+ vram_threshold = vram_threshold_value(xe);
+ err = xe_pm_set_vram_threshold(xe, vram_threshold);
if (err)
- return err;
+ goto err_unregister;
}
xe_pm_runtime_init(xe);
-
return 0;
+
+err_unregister:
+ unregister_pm_notifier(&xe->pm_notifier);
+ return err;
}
-/**
- * xe_pm_runtime_fini - Finalize Runtime PM
- * @xe: xe device instance
- */
-void xe_pm_runtime_fini(struct xe_device *xe)
+static void xe_pm_runtime_fini(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
@@ -309,6 +367,18 @@ void xe_pm_runtime_fini(struct xe_device *xe)
pm_runtime_forbid(dev);
}
+/**
+ * xe_pm_fini - Finalize PM
+ * @xe: xe device instance
+ */
+void xe_pm_fini(struct xe_device *xe)
+{
+ if (xe_device_uc_enabled(xe))
+ xe_pm_runtime_fini(xe);
+
+ unregister_pm_notifier(&xe->pm_notifier);
+}
+
static void xe_pm_write_callback_task(struct xe_device *xe,
struct task_struct *task)
{
@@ -386,9 +456,13 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
*/
xe_rpm_lockmap_acquire(xe);
+ err = xe_pxp_pm_suspend(xe->pxp);
+ if (err)
+ goto out;
+
/*
* Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
- * also checks and delets bo entry from user fault list.
+ * also checks and deletes bo entry from user fault list.
*/
mutex_lock(&xe->mem_access.vram_userfault.lock);
list_for_each_entry_safe(bo, on,
@@ -401,22 +475,27 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
if (xe->d3cold.allowed) {
err = xe_bo_evict_all(xe);
if (err)
- goto out;
+ goto out_resume;
}
for_each_gt(gt, xe, id) {
err = xe_gt_suspend(gt);
if (err)
- goto out;
+ goto out_resume;
}
xe_irq_suspend(xe);
- if (xe->d3cold.allowed)
- xe_display_pm_suspend_late(xe);
+ xe_display_pm_runtime_suspend_late(xe);
+
+ xe_rpm_lockmap_release(xe);
+ xe_pm_write_callback_task(xe, NULL);
+ return 0;
+
+out_resume:
+ xe_display_pm_runtime_resume(xe);
+ xe_pxp_pm_resume(xe->pxp);
out:
- if (err)
- xe_display_pm_runtime_resume(xe);
xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
return err;
@@ -451,7 +530,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
* This only restores pinned memory which is the memory
* required for the GT(s) to resume.
*/
- err = xe_bo_restore_kernel(xe);
+ err = xe_bo_restore_early(xe);
if (err)
goto out;
}
@@ -464,11 +543,13 @@ int xe_pm_runtime_resume(struct xe_device *xe)
xe_display_pm_runtime_resume(xe);
if (xe->d3cold.allowed) {
- err = xe_bo_restore_user(xe);
+ err = xe_bo_restore_late(xe);
if (err)
goto out;
}
+ xe_pxp_pm_resume(xe->pxp);
+
out:
xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
@@ -605,7 +686,8 @@ static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
struct device *dev = xe->drm.dev;
return dev->power.runtime_status == RPM_SUSPENDING ||
- dev->power.runtime_status == RPM_RESUMING;
+ dev->power.runtime_status == RPM_RESUMING ||
+ pm_suspend_in_progress();
#else
return false;
#endif
@@ -736,9 +818,6 @@ void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
xe->d3cold.allowed = false;
mutex_unlock(&xe->d3cold.lock);
-
- drm_dbg(&xe->drm,
- "d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed));
}
/**
diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
index 998d1ed64556..59678b310e55 100644
--- a/drivers/gpu/drm/xe/xe_pm.h
+++ b/drivers/gpu/drm/xe/xe_pm.h
@@ -17,7 +17,7 @@ int xe_pm_resume(struct xe_device *xe);
int xe_pm_init_early(struct xe_device *xe);
int xe_pm_init(struct xe_device *xe);
-void xe_pm_runtime_fini(struct xe_device *xe);
+void xe_pm_fini(struct xe_device *xe);
bool xe_pm_runtime_suspended(struct xe_device *xe);
int xe_pm_runtime_suspend(struct xe_device *xe);
int xe_pm_runtime_resume(struct xe_device *xe);
diff --git a/drivers/gpu/drm/xe/xe_pmu.c b/drivers/gpu/drm/xe/xe_pmu.c
new file mode 100644
index 000000000000..69df0e3520a5
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pmu.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/drm_drv.h>
+#include <linux/device.h>
+
+#include "xe_device.h"
+#include "xe_force_wake.h"
+#include "xe_gt_idle.h"
+#include "xe_guc_engine_activity.h"
+#include "xe_guc_pc.h"
+#include "xe_hw_engine.h"
+#include "xe_pm.h"
+#include "xe_pmu.h"
+#include "xe_sriov_pf_helpers.h"
+
+/**
+ * DOC: Xe PMU (Performance Monitoring Unit)
+ *
+ * Expose events/counters like GT-C6 residency, GT frequency and per-class-engine
+ * activity to user land via the perf interface. Events are per device.
+ *
+ * All events are listed in sysfs:
+ *
+ * $ ls -ld /sys/bus/event_source/devices/xe_*
+ * $ ls /sys/bus/event_source/devices/xe_0000_00_02.0/events/
+ * $ ls /sys/bus/event_source/devices/xe_0000_00_02.0/format/
+ *
+ * The following format parameters are available to read events,
+ * but only few are valid with each event:
+ *
+ * gt[60:63] Selects gt for the event
+ * engine_class[20:27] Selects engine-class for event
+ * engine_instance[12:19] Selects the engine-instance for the event
+ * function[44:59] Selects the function of the event (SRIOV enabled)
+ *
+ * For engine specific events (engine-*), gt, engine_class and engine_instance parameters must be
+ * set as populated by DRM_XE_DEVICE_QUERY_ENGINES and function if SRIOV is enabled.
+ *
+ * For gt specific events (gt-*) gt parameter must be passed. All other parameters will be 0.
+ *
+ * The standard perf tool can be used to grep for a certain event as well.
+ * Example:
+ *
+ * $ perf list | grep gt-c6
+ *
+ * To sample a specific event for a GT at regular intervals:
+ *
+ * $ perf stat -e <event_name,gt=> -I <interval>
+ */
+
+#define XE_PMU_EVENT_GT_MASK GENMASK_ULL(63, 60)
+#define XE_PMU_EVENT_FUNCTION_MASK GENMASK_ULL(59, 44)
+#define XE_PMU_EVENT_ENGINE_CLASS_MASK GENMASK_ULL(27, 20)
+#define XE_PMU_EVENT_ENGINE_INSTANCE_MASK GENMASK_ULL(19, 12)
+#define XE_PMU_EVENT_ID_MASK GENMASK_ULL(11, 0)
+
+static unsigned int config_to_event_id(u64 config)
+{
+ return FIELD_GET(XE_PMU_EVENT_ID_MASK, config);
+}
+
+static unsigned int config_to_function_id(u64 config)
+{
+ return FIELD_GET(XE_PMU_EVENT_FUNCTION_MASK, config);
+}
+
+static unsigned int config_to_engine_class(u64 config)
+{
+ return FIELD_GET(XE_PMU_EVENT_ENGINE_CLASS_MASK, config);
+}
+
+static unsigned int config_to_engine_instance(u64 config)
+{
+ return FIELD_GET(XE_PMU_EVENT_ENGINE_INSTANCE_MASK, config);
+}
+
+static unsigned int config_to_gt_id(u64 config)
+{
+ return FIELD_GET(XE_PMU_EVENT_GT_MASK, config);
+}
+
+#define XE_PMU_EVENT_GT_C6_RESIDENCY 0x01
+#define XE_PMU_EVENT_ENGINE_ACTIVE_TICKS 0x02
+#define XE_PMU_EVENT_ENGINE_TOTAL_TICKS 0x03
+#define XE_PMU_EVENT_GT_ACTUAL_FREQUENCY 0x04
+#define XE_PMU_EVENT_GT_REQUESTED_FREQUENCY 0x05
+
+static struct xe_gt *event_to_gt(struct perf_event *event)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ u64 gt = config_to_gt_id(event->attr.config);
+
+ return xe_device_get_gt(xe, gt);
+}
+
+static struct xe_hw_engine *event_to_hwe(struct perf_event *event)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ struct drm_xe_engine_class_instance eci;
+ u64 config = event->attr.config;
+ struct xe_hw_engine *hwe;
+
+ eci.engine_class = config_to_engine_class(config);
+ eci.engine_instance = config_to_engine_instance(config);
+ eci.gt_id = config_to_gt_id(config);
+
+ hwe = xe_hw_engine_lookup(xe, eci);
+ if (!hwe || xe_hw_engine_is_reserved(hwe))
+ return NULL;
+
+ return hwe;
+}
+
+static bool is_engine_event(u64 config)
+{
+ unsigned int event_id = config_to_event_id(config);
+
+ return (event_id == XE_PMU_EVENT_ENGINE_TOTAL_TICKS ||
+ event_id == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
+}
+
+static bool is_gt_frequency_event(struct perf_event *event)
+{
+ u32 id = config_to_event_id(event->attr.config);
+
+ return id == XE_PMU_EVENT_GT_ACTUAL_FREQUENCY ||
+ id == XE_PMU_EVENT_GT_REQUESTED_FREQUENCY;
+}
+
+static bool event_gt_forcewake(struct perf_event *event)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ u64 config = event->attr.config;
+ struct xe_gt *gt;
+ unsigned int *fw_ref;
+
+ if (!is_engine_event(config) && !is_gt_frequency_event(event))
+ return true;
+
+ gt = xe_device_get_gt(xe, config_to_gt_id(config));
+
+ fw_ref = kzalloc(sizeof(*fw_ref), GFP_KERNEL);
+ if (!fw_ref)
+ return false;
+
+ *fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!*fw_ref) {
+ kfree(fw_ref);
+ return false;
+ }
+
+ event->pmu_private = fw_ref;
+
+ return true;
+}
+
+static bool event_supported(struct xe_pmu *pmu, unsigned int gt,
+ unsigned int id)
+{
+ if (gt >= XE_MAX_GT_PER_TILE)
+ return false;
+
+ return id < sizeof(pmu->supported_events) * BITS_PER_BYTE &&
+ pmu->supported_events & BIT_ULL(id);
+}
+
+static bool event_param_valid(struct perf_event *event)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ unsigned int engine_class, engine_instance, function_id;
+ u64 config = event->attr.config;
+ struct xe_gt *gt;
+
+ gt = xe_device_get_gt(xe, config_to_gt_id(config));
+ if (!gt)
+ return false;
+
+ engine_class = config_to_engine_class(config);
+ engine_instance = config_to_engine_instance(config);
+ function_id = config_to_function_id(config);
+
+ switch (config_to_event_id(config)) {
+ case XE_PMU_EVENT_GT_C6_RESIDENCY:
+ case XE_PMU_EVENT_GT_ACTUAL_FREQUENCY:
+ case XE_PMU_EVENT_GT_REQUESTED_FREQUENCY:
+ if (engine_class || engine_instance || function_id)
+ return false;
+ break;
+ case XE_PMU_EVENT_ENGINE_ACTIVE_TICKS:
+ case XE_PMU_EVENT_ENGINE_TOTAL_TICKS:
+ if (!event_to_hwe(event))
+ return false;
+
+ /* PF(0) and total vfs when SRIOV is enabled */
+ if (IS_SRIOV_PF(xe)) {
+ if (function_id > xe_sriov_pf_get_totalvfs(xe))
+ return false;
+ } else if (function_id) {
+ return false;
+ }
+
+ break;
+ }
+
+ return true;
+}
+
+static void xe_pmu_event_destroy(struct perf_event *event)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ struct xe_gt *gt;
+ unsigned int *fw_ref = event->pmu_private;
+
+ if (fw_ref) {
+ gt = xe_device_get_gt(xe, config_to_gt_id(event->attr.config));
+ xe_force_wake_put(gt_to_fw(gt), *fw_ref);
+ kfree(fw_ref);
+ event->pmu_private = NULL;
+ }
+
+ drm_WARN_ON(&xe->drm, event->parent);
+ xe_pm_runtime_put(xe);
+ drm_dev_put(&xe->drm);
+}
+
+static int xe_pmu_event_init(struct perf_event *event)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ struct xe_pmu *pmu = &xe->pmu;
+ unsigned int id, gt;
+
+ if (!pmu->registered)
+ return -ENODEV;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* unsupported modes and filters */
+ if (event->attr.sample_period) /* no sampling */
+ return -EINVAL;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ gt = config_to_gt_id(event->attr.config);
+ id = config_to_event_id(event->attr.config);
+ if (!event_supported(pmu, gt, id))
+ return -ENOENT;
+
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ if (!event_param_valid(event))
+ return -ENOENT;
+
+ if (!event->parent) {
+ drm_dev_get(&xe->drm);
+ xe_pm_runtime_get(xe);
+ if (!event_gt_forcewake(event)) {
+ xe_pm_runtime_put(xe);
+ drm_dev_put(&xe->drm);
+ return -EINVAL;
+ }
+ event->destroy = xe_pmu_event_destroy;
+ }
+
+ return 0;
+}
+
+static u64 read_engine_events(struct xe_gt *gt, struct perf_event *event)
+{
+ struct xe_hw_engine *hwe;
+ unsigned int function_id;
+ u64 config, val = 0;
+
+ config = event->attr.config;
+ function_id = config_to_function_id(config);
+
+ hwe = event_to_hwe(event);
+ if (config_to_event_id(config) == XE_PMU_EVENT_ENGINE_ACTIVE_TICKS)
+ val = xe_guc_engine_activity_active_ticks(&gt->uc.guc, hwe, function_id);
+ else
+ val = xe_guc_engine_activity_total_ticks(&gt->uc.guc, hwe, function_id);
+
+ return val;
+}
+
+static u64 __xe_pmu_event_read(struct perf_event *event)
+{
+ struct xe_gt *gt = event_to_gt(event);
+
+ if (!gt)
+ return 0;
+
+ switch (config_to_event_id(event->attr.config)) {
+ case XE_PMU_EVENT_GT_C6_RESIDENCY:
+ return xe_gt_idle_residency_msec(&gt->gtidle);
+ case XE_PMU_EVENT_ENGINE_ACTIVE_TICKS:
+ case XE_PMU_EVENT_ENGINE_TOTAL_TICKS:
+ return read_engine_events(gt, event);
+ case XE_PMU_EVENT_GT_ACTUAL_FREQUENCY:
+ return xe_guc_pc_get_act_freq(&gt->uc.guc.pc);
+ case XE_PMU_EVENT_GT_REQUESTED_FREQUENCY:
+ return xe_guc_pc_get_cur_freq_fw(&gt->uc.guc.pc);
+ }
+
+ return 0;
+}
+
+static void xe_pmu_event_update(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 prev, new;
+
+ prev = local64_read(&hwc->prev_count);
+ do {
+ new = __xe_pmu_event_read(event);
+ } while (!local64_try_cmpxchg(&hwc->prev_count, &prev, new));
+
+ /*
+ * GT frequency is not a monotonically increasing counter, so add the
+ * instantaneous value instead.
+ */
+ if (is_gt_frequency_event(event))
+ local64_add(new, &event->count);
+ else
+ local64_add(new - prev, &event->count);
+}
+
+static void xe_pmu_event_read(struct perf_event *event)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ struct xe_pmu *pmu = &xe->pmu;
+
+ if (!pmu->registered) {
+ event->hw.state = PERF_HES_STOPPED;
+ return;
+ }
+
+ xe_pmu_event_update(event);
+}
+
+static void xe_pmu_enable(struct perf_event *event)
+{
+ /*
+ * Store the current counter value so we can report the correct delta
+ * for all listeners. Even when the event was already enabled and has
+ * an existing non-zero value.
+ */
+ local64_set(&event->hw.prev_count, __xe_pmu_event_read(event));
+}
+
+static void xe_pmu_event_start(struct perf_event *event, int flags)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ struct xe_pmu *pmu = &xe->pmu;
+
+ if (!pmu->registered)
+ return;
+
+ xe_pmu_enable(event);
+ event->hw.state = 0;
+}
+
+static void xe_pmu_event_stop(struct perf_event *event, int flags)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ struct xe_pmu *pmu = &xe->pmu;
+
+ if (pmu->registered)
+ if (flags & PERF_EF_UPDATE)
+ xe_pmu_event_update(event);
+
+ event->hw.state = PERF_HES_STOPPED;
+}
+
+static int xe_pmu_event_add(struct perf_event *event, int flags)
+{
+ struct xe_device *xe = container_of(event->pmu, typeof(*xe), pmu.base);
+ struct xe_pmu *pmu = &xe->pmu;
+
+ if (!pmu->registered)
+ return -ENODEV;
+
+ if (flags & PERF_EF_START)
+ xe_pmu_event_start(event, flags);
+
+ return 0;
+}
+
+static void xe_pmu_event_del(struct perf_event *event, int flags)
+{
+ xe_pmu_event_stop(event, PERF_EF_UPDATE);
+}
+
+PMU_FORMAT_ATTR(gt, "config:60-63");
+PMU_FORMAT_ATTR(function, "config:44-59");
+PMU_FORMAT_ATTR(engine_class, "config:20-27");
+PMU_FORMAT_ATTR(engine_instance, "config:12-19");
+PMU_FORMAT_ATTR(event, "config:0-11");
+
+static struct attribute *pmu_format_attrs[] = {
+ &format_attr_event.attr,
+ &format_attr_engine_class.attr,
+ &format_attr_engine_instance.attr,
+ &format_attr_function.attr,
+ &format_attr_gt.attr,
+ NULL,
+};
+
+static const struct attribute_group pmu_format_attr_group = {
+ .name = "format",
+ .attrs = pmu_format_attrs,
+};
+
+static ssize_t event_attr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct perf_pmu_events_attr *pmu_attr =
+ container_of(attr, struct perf_pmu_events_attr, attr);
+
+ return sprintf(buf, "event=%#04llx\n", pmu_attr->id);
+}
+
+#define XE_EVENT_ATTR(name_, v_, id_) \
+ PMU_EVENT_ATTR(name_, pmu_event_ ## v_, id_, event_attr_show)
+
+#define XE_EVENT_ATTR_UNIT(name_, v_, unit_) \
+ PMU_EVENT_ATTR_STRING(name_.unit, pmu_event_unit_ ## v_, unit_)
+
+#define XE_EVENT_ATTR_GROUP(v_, id_, ...) \
+ static struct attribute *pmu_attr_ ##v_[] = { \
+ __VA_ARGS__, \
+ NULL \
+ }; \
+ static umode_t is_visible_##v_(struct kobject *kobj, \
+ struct attribute *attr, int idx) \
+ { \
+ struct perf_pmu_events_attr *pmu_attr; \
+ struct xe_pmu *pmu; \
+ \
+ pmu_attr = container_of(attr, typeof(*pmu_attr), attr.attr); \
+ pmu = container_of(dev_get_drvdata(kobj_to_dev(kobj)), \
+ typeof(*pmu), base); \
+ \
+ return event_supported(pmu, 0, id_) ? attr->mode : 0; \
+ } \
+ static const struct attribute_group pmu_group_ ##v_ = { \
+ .name = "events", \
+ .attrs = pmu_attr_ ## v_, \
+ .is_visible = is_visible_ ## v_, \
+ }
+
+#define XE_EVENT_ATTR_SIMPLE(name_, v_, id_, unit_) \
+ XE_EVENT_ATTR(name_, v_, id_) \
+ XE_EVENT_ATTR_UNIT(name_, v_, unit_) \
+ XE_EVENT_ATTR_GROUP(v_, id_, &pmu_event_ ##v_.attr.attr, \
+ &pmu_event_unit_ ##v_.attr.attr)
+
+#define XE_EVENT_ATTR_NOUNIT(name_, v_, id_) \
+ XE_EVENT_ATTR(name_, v_, id_) \
+ XE_EVENT_ATTR_GROUP(v_, id_, &pmu_event_ ##v_.attr.attr)
+
+XE_EVENT_ATTR_SIMPLE(gt-c6-residency, gt_c6_residency, XE_PMU_EVENT_GT_C6_RESIDENCY, "ms");
+XE_EVENT_ATTR_NOUNIT(engine-active-ticks, engine_active_ticks, XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
+XE_EVENT_ATTR_NOUNIT(engine-total-ticks, engine_total_ticks, XE_PMU_EVENT_ENGINE_TOTAL_TICKS);
+XE_EVENT_ATTR_SIMPLE(gt-actual-frequency, gt_actual_frequency,
+ XE_PMU_EVENT_GT_ACTUAL_FREQUENCY, "MHz");
+XE_EVENT_ATTR_SIMPLE(gt-requested-frequency, gt_requested_frequency,
+ XE_PMU_EVENT_GT_REQUESTED_FREQUENCY, "MHz");
+
+static struct attribute *pmu_empty_event_attrs[] = {
+ /* Empty - all events are added as groups with .attr_update() */
+ NULL,
+};
+
+static const struct attribute_group pmu_events_attr_group = {
+ .name = "events",
+ .attrs = pmu_empty_event_attrs,
+};
+
+static const struct attribute_group *pmu_events_attr_update[] = {
+ &pmu_group_gt_c6_residency,
+ &pmu_group_engine_active_ticks,
+ &pmu_group_engine_total_ticks,
+ &pmu_group_gt_actual_frequency,
+ &pmu_group_gt_requested_frequency,
+ NULL,
+};
+
+static void set_supported_events(struct xe_pmu *pmu)
+{
+ struct xe_device *xe = container_of(pmu, typeof(*xe), pmu);
+ struct xe_gt *gt = xe_device_get_gt(xe, 0);
+
+ if (!xe->info.skip_guc_pc) {
+ pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_C6_RESIDENCY);
+ pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_ACTUAL_FREQUENCY);
+ pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_REQUESTED_FREQUENCY);
+ }
+
+ if (xe_guc_engine_activity_supported(&gt->uc.guc)) {
+ pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
+ pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_ENGINE_TOTAL_TICKS);
+ }
+}
+
+/**
+ * xe_pmu_unregister() - Remove/cleanup PMU registration
+ * @arg: Ptr to pmu
+ */
+static void xe_pmu_unregister(void *arg)
+{
+ struct xe_pmu *pmu = arg;
+ struct xe_device *xe = container_of(pmu, typeof(*xe), pmu);
+
+ if (!pmu->registered)
+ return;
+
+ pmu->registered = false;
+
+ perf_pmu_unregister(&pmu->base);
+ kfree(pmu->name);
+}
+
+/**
+ * xe_pmu_register() - Define basic PMU properties for Xe and add event callbacks.
+ * @pmu: the PMU object
+ *
+ * Returns 0 on success and an appropriate error code otherwise
+ */
+int xe_pmu_register(struct xe_pmu *pmu)
+{
+ struct xe_device *xe = container_of(pmu, typeof(*xe), pmu);
+ static const struct attribute_group *attr_groups[] = {
+ &pmu_format_attr_group,
+ &pmu_events_attr_group,
+ NULL
+ };
+ int ret = -ENOMEM;
+ char *name;
+
+ BUILD_BUG_ON(XE_MAX_GT_PER_TILE != XE_PMU_MAX_GT);
+
+ if (IS_SRIOV_VF(xe))
+ return 0;
+
+ name = kasprintf(GFP_KERNEL, "xe_%s",
+ dev_name(xe->drm.dev));
+ if (!name)
+ goto err;
+
+ /* tools/perf reserves colons as special. */
+ strreplace(name, ':', '_');
+
+ pmu->name = name;
+ pmu->base.attr_groups = attr_groups;
+ pmu->base.attr_update = pmu_events_attr_update;
+ pmu->base.scope = PERF_PMU_SCOPE_SYS_WIDE;
+ pmu->base.module = THIS_MODULE;
+ pmu->base.task_ctx_nr = perf_invalid_context;
+ pmu->base.event_init = xe_pmu_event_init;
+ pmu->base.add = xe_pmu_event_add;
+ pmu->base.del = xe_pmu_event_del;
+ pmu->base.start = xe_pmu_event_start;
+ pmu->base.stop = xe_pmu_event_stop;
+ pmu->base.read = xe_pmu_event_read;
+
+ set_supported_events(pmu);
+
+ ret = perf_pmu_register(&pmu->base, pmu->name, -1);
+ if (ret)
+ goto err_name;
+
+ pmu->registered = true;
+
+ return devm_add_action_or_reset(xe->drm.dev, xe_pmu_unregister, pmu);
+
+err_name:
+ kfree(name);
+err:
+ drm_err(&xe->drm, "Failed to register PMU (ret=%d)!\n", ret);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_pmu.h b/drivers/gpu/drm/xe/xe_pmu.h
new file mode 100644
index 000000000000..60c37126f87e
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pmu.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_PMU_H_
+#define _XE_PMU_H_
+
+#include "xe_pmu_types.h"
+
+#if IS_ENABLED(CONFIG_PERF_EVENTS)
+int xe_pmu_register(struct xe_pmu *pmu);
+#else
+static inline int xe_pmu_register(struct xe_pmu *pmu) { return 0; }
+#endif
+
+#endif
+
diff --git a/drivers/gpu/drm/xe/xe_pmu_types.h b/drivers/gpu/drm/xe/xe_pmu_types.h
new file mode 100644
index 000000000000..f5ba4d56622c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pmu_types.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_PMU_TYPES_H_
+#define _XE_PMU_TYPES_H_
+
+#include <linux/perf_event.h>
+#include <linux/spinlock_types.h>
+
+#define XE_PMU_MAX_GT 2
+
+/**
+ * struct xe_pmu - PMU related data per Xe device
+ *
+ * Stores per device PMU info that includes event/perf attributes and sampling
+ * counters across all GTs for this device.
+ */
+struct xe_pmu {
+ /**
+ * @base: PMU base.
+ */
+ struct pmu base;
+ /**
+ * @registered: PMU is registered and not in the unregistering process.
+ */
+ bool registered;
+ /**
+ * @name: Name as registered with perf core.
+ */
+ const char *name;
+ /**
+ * @supported_events: Bitmap of supported events, indexed by event id
+ */
+ u64 supported_events;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index f27f579f4d85..b04756a97cdc 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -20,6 +20,7 @@
#include "xe_res_cursor.h"
#include "xe_sched_job.h"
#include "xe_sync.h"
+#include "xe_svm.h"
#include "xe_trace.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
@@ -28,6 +29,8 @@ struct xe_pt_dir {
struct xe_pt pt;
/** @children: Array of page-table child nodes */
struct xe_ptw *children[XE_PDES];
+ /** @staging: Array of page-table staging nodes */
+ struct xe_ptw *staging[XE_PDES];
};
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
@@ -48,9 +51,10 @@ static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt)
return container_of(pt, struct xe_pt_dir, pt);
}
-static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index)
+static struct xe_pt *
+xe_pt_entry_staging(struct xe_pt_dir *pt_dir, unsigned int index)
{
- return container_of(pt_dir->children[index], struct xe_pt, base);
+ return container_of(pt_dir->staging[index], struct xe_pt, base);
}
static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
@@ -99,6 +103,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
{
struct xe_pt *pt;
struct xe_bo *bo;
+ u32 bo_flags;
int err;
if (level) {
@@ -111,20 +116,23 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
if (!pt)
return ERR_PTR(-ENOMEM);
+ bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
+ XE_BO_FLAG_NO_RESV_EVICT | XE_BO_FLAG_PAGETABLE;
+ if (vm->xef) /* userspace */
+ bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
+
pt->level = level;
bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
- XE_BO_FLAG_PINNED |
- XE_BO_FLAG_NO_RESV_EVICT |
- XE_BO_FLAG_PAGETABLE);
+ bo_flags);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto err_kfree;
}
pt->bo = bo;
pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL;
+ pt->base.staging = level ? as_xe_pt_dir(pt)->staging : NULL;
if (vm->xef)
xe_drm_client_add_bo(vm->xef->client, pt->bo);
@@ -136,6 +144,7 @@ err_kfree:
xe_pt_free(pt);
return ERR_PTR(err);
}
+ALLOW_ERROR_INJECTION(xe_pt_create, ERRNO);
/**
* xe_pt_populate_empty() - Populate a page-table bo with scratch- or zero
@@ -205,8 +214,8 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
for (i = 0; i < XE_PDES; i++) {
- if (xe_pt_entry(pt_dir, i))
- xe_pt_destroy(xe_pt_entry(pt_dir, i), flags,
+ if (xe_pt_entry_staging(pt_dir, i))
+ xe_pt_destroy(xe_pt_entry_staging(pt_dir, i), flags,
deferred);
}
}
@@ -214,6 +223,20 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
}
/**
+ * xe_pt_clear() - Clear a page-table.
+ * @xe: xe device.
+ * @pt: The page-table.
+ *
+ * Clears page-table by setting to zero.
+ */
+void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt)
+{
+ struct iosys_map *map = &pt->bo->vmap;
+
+ xe_map_memset(xe, map, 0, 0, SZ_4K);
+}
+
+/**
* DOC: Pagetable building
*
* Below we use the term "page-table" for both page-directories, containing
@@ -249,8 +272,11 @@ struct xe_pt_update {
bool preexisting;
};
+/**
+ * struct xe_pt_stage_bind_walk - Walk state for the stage_bind walk.
+ */
struct xe_pt_stage_bind_walk {
- /** base: The base class. */
+ /** @base: The base class. */
struct xe_pt_walk base;
/* Input parameters for the walk */
@@ -258,15 +284,19 @@ struct xe_pt_stage_bind_walk {
struct xe_vm *vm;
/** @tile: The tile we're building for. */
struct xe_tile *tile;
- /** @default_pte: PTE flag only template. No address is associated */
- u64 default_pte;
+ /** @default_vram_pte: PTE flag only template for VRAM. No address is associated */
+ u64 default_vram_pte;
+ /** @default_system_pte: PTE flag only template for System. No address is associated */
+ u64 default_system_pte;
/** @dma_offset: DMA offset to add to the PTE. */
u64 dma_offset;
/**
- * @needs_64k: This address range enforces 64K alignment and
- * granularity.
+ * @needs_64K: This address range enforces 64K alignment and
+ * granularity on VRAM.
*/
bool needs_64K;
+ /** @clear_pt: clear page table entries during the bind walk */
+ bool clear_pt;
/**
* @vma: VMA being mapped
*/
@@ -275,10 +305,11 @@ struct xe_pt_stage_bind_walk {
/* Also input, but is updated during the walk*/
/** @curs: The DMA address cursor. */
struct xe_res_cursor *curs;
- /** @va_curs_start: The Virtual address coresponding to @curs->start */
+ /** @va_curs_start: The Virtual address corresponding to @curs->start */
u64 va_curs_start;
/* Output */
+ /** @wupd: Walk output data for page-table updates. */
struct xe_walk_update {
/** @wupd.entries: Caller provided storage. */
struct xe_vm_pgtable_update *entries;
@@ -296,7 +327,7 @@ struct xe_pt_stage_bind_walk {
u64 l0_end_addr;
/** @addr_64K: The start address of the current 64K chunk. */
u64 addr_64K;
- /** @found_64: Whether @add_64K actually points to a 64K chunk. */
+ /** @found_64K: Whether @add_64K actually points to a 64K chunk. */
bool found_64K;
};
@@ -375,8 +406,10 @@ xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent,
/* Continue building a non-connected subtree. */
struct iosys_map *map = &parent->bo->vmap;
- if (unlikely(xe_child))
+ if (unlikely(xe_child)) {
parent->base.children[offset] = &xe_child->base;
+ parent->base.staging[offset] = &xe_child->base;
+ }
xe_pt_write(xe_walk->vm->xe, map, offset, pte);
parent->num_live++;
@@ -414,6 +447,10 @@ static bool xe_pt_hugepte_possible(u64 addr, u64 next, unsigned int level,
if (xe_vma_is_null(xe_walk->vma))
return true;
+ /* if we are clearing page table, no dma addresses*/
+ if (xe_walk->clear_pt)
+ return true;
+
/* Is the DMA address huge PTE size aligned? */
size = next - addr;
dma = addr - xe_walk->va_curs_start + xe_res_dma(xe_walk->curs);
@@ -493,24 +530,35 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
if (level == 0 || xe_pt_hugepte_possible(addr, next, level, xe_walk)) {
struct xe_res_cursor *curs = xe_walk->curs;
bool is_null = xe_vma_is_null(xe_walk->vma);
+ bool is_vram = is_null ? false : xe_res_is_vram(curs);
XE_WARN_ON(xe_walk->va_curs_start != addr);
- pte = vm->pt_ops->pte_encode_vma(is_null ? 0 :
- xe_res_dma(curs) + xe_walk->dma_offset,
- xe_walk->vma, pat_index, level);
- pte |= xe_walk->default_pte;
+ if (xe_walk->clear_pt) {
+ pte = 0;
+ } else {
+ pte = vm->pt_ops->pte_encode_vma(is_null ? 0 :
+ xe_res_dma(curs) +
+ xe_walk->dma_offset,
+ xe_walk->vma,
+ pat_index, level);
+ if (!is_null)
+ pte |= is_vram ? xe_walk->default_vram_pte :
+ xe_walk->default_system_pte;
- /*
- * Set the XE_PTE_PS64 hint if possible, otherwise if
- * this device *requires* 64K PTE size for VRAM, fail.
- */
- if (level == 0 && !xe_parent->is_compact) {
- if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
- xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K;
- pte |= XE_PTE_PS64;
- } else if (XE_WARN_ON(xe_walk->needs_64K)) {
- return -EINVAL;
+ /*
+ * Set the XE_PTE_PS64 hint if possible, otherwise if
+ * this device *requires* 64K PTE size for VRAM, fail.
+ */
+ if (level == 0 && !xe_parent->is_compact) {
+ if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
+ xe_walk->vma->gpuva.flags |=
+ XE_VMA_PTE_64K;
+ pte |= XE_PTE_PS64;
+ } else if (XE_WARN_ON(xe_walk->needs_64K &&
+ is_vram)) {
+ return -EINVAL;
+ }
}
}
@@ -518,7 +566,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
if (unlikely(ret))
return ret;
- if (!is_null)
+ if (!is_null && !xe_walk->clear_pt)
xe_res_next(curs, next - addr);
xe_walk->va_curs_start = next;
xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level);
@@ -581,14 +629,54 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
.pt_entry = xe_pt_stage_bind_entry,
};
+/*
+ * Default atomic expectations for different allocation scenarios are as follows:
+ *
+ * 1. Traditional API: When the VM is not in LR mode:
+ * - Device atomics are expected to function with all allocations.
+ *
+ * 2. Compute/SVM API: When the VM is in LR mode:
+ * - Device atomics are the default behavior when the bo is placed in a single region.
+ * - In all other cases device atomics will be disabled with AE=0 until an application
+ * request differently using a ioctl like madvise.
+ */
+static bool xe_atomic_for_vram(struct xe_vm *vm)
+{
+ return true;
+}
+
+static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_bo *bo)
+{
+ struct xe_device *xe = vm->xe;
+
+ if (!xe->info.has_device_atomics_on_smem)
+ return false;
+
+ /*
+ * If a SMEM+LMEM allocation is backed by SMEM, a device
+ * atomics will cause a gpu page fault and which then
+ * gets migrated to LMEM, bind such allocations with
+ * device atomics enabled.
+ *
+ * TODO: Revisit this. Perhaps add something like a
+ * fault_on_atomics_in_system UAPI flag.
+ * Note that this also prohibits GPU atomics in LR mode for
+ * userptr and system memory on DGFX.
+ */
+ return (!IS_DGFX(xe) || (!xe_vm_in_lr_mode(vm) ||
+ (bo && xe_bo_has_single_placement(bo))));
+}
+
/**
* xe_pt_stage_bind() - Build a disconnected page-table tree for a given address
* range.
* @tile: The tile we're building for.
* @vma: The vma indicating the address range.
+ * @range: The range indicating the address range.
* @entries: Storage for the update entries used for connecting the tree to
* the main tree at commit time.
* @num_entries: On output contains the number of @entries used.
+ * @clear_pt: Clear the page table entries.
*
* This function builds a disconnected page-table tree for a given address
* range. The tree is connected to the main vm tree for the gpu using
@@ -601,76 +689,72 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
*/
static int
xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
- struct xe_vm_pgtable_update *entries, u32 *num_entries)
+ struct xe_svm_range *range,
+ struct xe_vm_pgtable_update *entries,
+ u32 *num_entries, bool clear_pt)
{
struct xe_device *xe = tile_to_xe(tile);
struct xe_bo *bo = xe_vma_bo(vma);
- bool is_devmem = !xe_vma_is_userptr(vma) && bo &&
- (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo));
struct xe_res_cursor curs;
+ struct xe_vm *vm = xe_vma_vm(vma);
struct xe_pt_stage_bind_walk xe_walk = {
.base = {
.ops = &xe_pt_stage_bind_ops,
.shifts = xe_normal_pt_shifts,
.max_level = XE_PT_HIGHEST_LEVEL,
+ .staging = true,
},
- .vm = xe_vma_vm(vma),
+ .vm = vm,
.tile = tile,
.curs = &curs,
- .va_curs_start = xe_vma_start(vma),
+ .va_curs_start = range ? range->base.itree.start :
+ xe_vma_start(vma),
.vma = vma,
.wupd.entries = entries,
- .needs_64K = (xe_vma_vm(vma)->flags & XE_VM_FLAG_64K) && is_devmem,
+ .clear_pt = clear_pt,
};
- struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
+ struct xe_pt *pt = vm->pt_root[tile->id];
int ret;
- /**
- * Default atomic expectations for different allocation scenarios are as follows:
- *
- * 1. Traditional API: When the VM is not in LR mode:
- * - Device atomics are expected to function with all allocations.
- *
- * 2. Compute/SVM API: When the VM is in LR mode:
- * - Device atomics are the default behavior when the bo is placed in a single region.
- * - In all other cases device atomics will be disabled with AE=0 until an application
- * request differently using a ioctl like madvise.
- */
- if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) {
- if (xe_vm_in_lr_mode(xe_vma_vm(vma))) {
- if (bo && xe_bo_has_single_placement(bo))
- xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
- /**
- * If a SMEM+LMEM allocation is backed by SMEM, a device
- * atomics will cause a gpu page fault and which then
- * gets migrated to LMEM, bind such allocations with
- * device atomics enabled.
- */
- else if (is_devmem && !xe_bo_has_single_placement(bo))
- xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
+ if (range) {
+ /* Move this entire thing to xe_svm.c? */
+ xe_svm_notifier_lock(vm);
+ if (!xe_svm_range_pages_valid(range)) {
+ xe_svm_range_debug(range, "BIND PREPARE - RETRY");
+ xe_svm_notifier_unlock(vm);
+ return -EAGAIN;
+ }
+ if (xe_svm_range_has_dma_mapping(range)) {
+ xe_res_first_dma(range->base.dma_addr, 0,
+ range->base.itree.last + 1 - range->base.itree.start,
+ &curs);
+ xe_svm_range_debug(range, "BIND PREPARE - MIXED");
} else {
- xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE;
+ xe_assert(xe, false);
}
-
- /**
- * Unset AE if the platform(PVC) doesn't support it on an
- * allocation
+ /*
+ * Note, when unlocking the resource cursor dma addresses may become
+ * stale, but the bind will be aborted anyway at commit time.
*/
- if (!xe->info.has_device_atomics_on_smem && !is_devmem)
- xe_walk.default_pte &= ~XE_USM_PPGTT_PTE_AE;
+ xe_svm_notifier_unlock(vm);
}
- if (is_devmem) {
- xe_walk.default_pte |= XE_PPGTT_PTE_DM;
- xe_walk.dma_offset = vram_region_gpu_offset(bo->ttm.resource);
- }
+ xe_walk.needs_64K = (vm->flags & XE_VM_FLAG_64K);
+ if (clear_pt)
+ goto walk_pt;
- if (!xe_vma_has_no_bo(vma) && xe_bo_is_stolen(bo))
- xe_walk.dma_offset = xe_ttm_stolen_gpu_offset(xe_bo_device(bo));
+ if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) {
+ xe_walk.default_vram_pte = xe_atomic_for_vram(vm) ? XE_USM_PPGTT_PTE_AE : 0;
+ xe_walk.default_system_pte = xe_atomic_for_system(vm, bo) ?
+ XE_USM_PPGTT_PTE_AE : 0;
+ }
- xe_bo_assert_held(bo);
+ xe_walk.default_vram_pte |= XE_PPGTT_PTE_DM;
+ xe_walk.dma_offset = bo ? vram_region_gpu_offset(bo->ttm.resource) : 0;
+ if (!range)
+ xe_bo_assert_held(bo);
- if (!xe_vma_is_null(vma)) {
+ if (!xe_vma_is_null(vma) && !range) {
if (xe_vma_is_userptr(vma))
xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0,
xe_vma_size(vma), &curs);
@@ -680,12 +764,15 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
else
xe_res_first_sg(xe_bo_sg(bo), xe_vma_bo_offset(vma),
xe_vma_size(vma), &curs);
- } else {
+ } else if (!range) {
curs.size = xe_vma_size(vma);
}
- ret = xe_pt_walk_range(&pt->base, pt->level, xe_vma_start(vma),
- xe_vma_end(vma), &xe_walk.base);
+walk_pt:
+ ret = xe_pt_walk_range(&pt->base, pt->level,
+ range ? range->base.itree.start : xe_vma_start(vma),
+ range ? range->base.itree.last + 1 : xe_vma_end(vma),
+ &xe_walk.base);
*num_entries = xe_walk.wupd.num_used_entries;
return ret;
@@ -829,6 +916,46 @@ bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma)
return xe_walk.needs_invalidate;
}
+/**
+ * xe_pt_zap_ptes_range() - Zap (zero) gpu ptes of a SVM range
+ * @tile: The tile we're zapping for.
+ * @vm: The VM we're zapping for.
+ * @range: The SVM range we're zapping for.
+ *
+ * SVM invalidation needs to be able to zap the gpu ptes of a given address
+ * range. In order to be able to do that, that function needs access to the
+ * shared page-table entries so it can either clear the leaf PTEs or
+ * clear the pointers to lower-level page-tables. The caller is required
+ * to hold the SVM notifier lock.
+ *
+ * Return: Whether ptes were actually updated and a TLB invalidation is
+ * required.
+ */
+bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm,
+ struct xe_svm_range *range)
+{
+ struct xe_pt_zap_ptes_walk xe_walk = {
+ .base = {
+ .ops = &xe_pt_zap_ptes_ops,
+ .shifts = xe_normal_pt_shifts,
+ .max_level = XE_PT_HIGHEST_LEVEL,
+ },
+ .tile = tile,
+ };
+ struct xe_pt *pt = vm->pt_root[tile->id];
+ u8 pt_mask = (range->tile_present & ~range->tile_invalidated);
+
+ xe_svm_assert_in_notifier(vm);
+
+ if (!(pt_mask & BIT(tile->id)))
+ return false;
+
+ (void)xe_pt_walk_shared(&pt->base, pt->level, range->base.itree.start,
+ range->base.itree.last + 1, &xe_walk.base);
+
+ return xe_walk.needs_invalidate;
+}
+
static void
xe_vm_populate_pgtable(struct xe_migrate_pt_update *pt_update, struct xe_tile *tile,
struct iosys_map *map, void *data,
@@ -872,18 +999,38 @@ static void xe_pt_cancel_bind(struct xe_vma *vma,
}
}
-static void xe_pt_commit_locks_assert(struct xe_vma *vma)
+#define XE_INVALID_VMA ((struct xe_vma *)(0xdeaddeadull))
+
+static void xe_pt_commit_prepare_locks_assert(struct xe_vma *vma)
{
- struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_vm *vm;
+ if (vma == XE_INVALID_VMA)
+ return;
+
+ vm = xe_vma_vm(vma);
lockdep_assert_held(&vm->lock);
- if (!xe_vma_is_userptr(vma) && !xe_vma_is_null(vma))
+ if (!xe_vma_has_no_bo(vma))
dma_resv_assert_held(xe_vma_bo(vma)->ttm.base.resv);
xe_vm_assert_held(vm);
}
+static void xe_pt_commit_locks_assert(struct xe_vma *vma)
+{
+ struct xe_vm *vm;
+
+ if (vma == XE_INVALID_VMA)
+ return;
+
+ vm = xe_vma_vm(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
+
+ if (xe_vma_is_userptr(vma))
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
+}
+
static void xe_pt_commit(struct xe_vma *vma,
struct xe_vm_pgtable_update *entries,
u32 num_entries, struct llist_head *deferred)
@@ -894,14 +1041,19 @@ static void xe_pt_commit(struct xe_vma *vma,
for (i = 0; i < num_entries; i++) {
struct xe_pt *pt = entries[i].pt;
+ struct xe_pt_dir *pt_dir;
if (!pt->level)
continue;
+ pt_dir = as_xe_pt_dir(pt);
for (j = 0; j < entries[i].qwords; j++) {
struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
+ int j_ = j + entries[i].ofs;
- xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred);
+ pt_dir->children[j_] = pt_dir->staging[j_];
+ xe_pt_destroy(oldpte, (vma == XE_INVALID_VMA) ? 0 :
+ xe_vma_vm(vma)->flags, deferred);
}
}
}
@@ -912,7 +1064,7 @@ static void xe_pt_abort_bind(struct xe_vma *vma,
{
int i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = num_entries - 1; i >= 0; --i) {
struct xe_pt *pt = entries[i].pt;
@@ -927,10 +1079,10 @@ static void xe_pt_abort_bind(struct xe_vma *vma,
pt_dir = as_xe_pt_dir(pt);
for (j = 0; j < entries[i].qwords; j++) {
u32 j_ = j + entries[i].ofs;
- struct xe_pt *newpte = xe_pt_entry(pt_dir, j_);
+ struct xe_pt *newpte = xe_pt_entry_staging(pt_dir, j_);
struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
- pt_dir->children[j_] = oldpte ? &oldpte->base : 0;
+ pt_dir->staging[j_] = oldpte ? &oldpte->base : 0;
xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL);
}
}
@@ -942,7 +1094,7 @@ static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
{
u32 i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = 0; i < num_entries; i++) {
struct xe_pt *pt = entries[i].pt;
@@ -960,10 +1112,10 @@ static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
struct xe_pt *newpte = entries[i].pt_entries[j].pt;
struct xe_pt *oldpte = NULL;
- if (xe_pt_entry(pt_dir, j_))
- oldpte = xe_pt_entry(pt_dir, j_);
+ if (xe_pt_entry_staging(pt_dir, j_))
+ oldpte = xe_pt_entry_staging(pt_dir, j_);
- pt_dir->children[j_] = &newpte->base;
+ pt_dir->staging[j_] = &newpte->base;
entries[i].pt_entries[j].pt = oldpte;
}
}
@@ -980,12 +1132,15 @@ static void xe_pt_free_bind(struct xe_vm_pgtable_update *entries,
static int
xe_pt_prepare_bind(struct xe_tile *tile, struct xe_vma *vma,
- struct xe_vm_pgtable_update *entries, u32 *num_entries)
+ struct xe_svm_range *range,
+ struct xe_vm_pgtable_update *entries,
+ u32 *num_entries, bool invalidate_on_bind)
{
int err;
*num_entries = 0;
- err = xe_pt_stage_bind(tile, vma, entries, num_entries);
+ err = xe_pt_stage_bind(tile, vma, range, entries, num_entries,
+ invalidate_on_bind);
if (!err)
xe_tile_assert(tile, *num_entries);
@@ -1068,6 +1223,11 @@ static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op,
{
int err = 0;
+ /*
+ * No need to check for is_cpu_addr_mirror here as vma_add_deps is a
+ * NOP if VMA is_cpu_addr_mirror
+ */
+
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
if (!op->map.immediate && xe_vm_in_fault_mode(vm))
@@ -1086,6 +1246,8 @@ static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op,
case DRM_GPUVA_OP_PREFETCH:
err = vma_add_deps(gpuva_to_vma(op->base.prefetch.va), job);
break;
+ case DRM_GPUVA_OP_DRIVER:
+ break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
@@ -1212,42 +1374,22 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
return 0;
uvma = to_userptr_vma(vma);
- notifier_seq = uvma->userptr.notifier_seq;
+ if (xe_pt_userptr_inject_eagain(uvma))
+ xe_vma_userptr_force_invalidate(uvma);
- if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm))
- return 0;
+ notifier_seq = uvma->userptr.notifier_seq;
if (!mmu_interval_read_retry(&uvma->userptr.notifier,
- notifier_seq) &&
- !xe_pt_userptr_inject_eagain(uvma))
+ notifier_seq))
return 0;
- if (xe_vm_in_fault_mode(vm)) {
+ if (xe_vm_in_fault_mode(vm))
return -EAGAIN;
- } else {
- spin_lock(&vm->userptr.invalidated_lock);
- list_move_tail(&uvma->userptr.invalidate_link,
- &vm->userptr.invalidated);
- spin_unlock(&vm->userptr.invalidated_lock);
-
- if (xe_vm_in_preempt_fence_mode(vm)) {
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
- long err;
-
- dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP);
- dma_resv_for_each_fence_unlocked(&cursor, fence)
- dma_fence_enable_sw_signaling(fence);
- dma_resv_iter_end(&cursor);
-
- err = dma_resv_wait_timeout(xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP,
- false, MAX_SCHEDULE_TIMEOUT);
- XE_WARN_ON(err <= 0);
- }
- }
+ /*
+ * Just continue the operation since exec or rebind worker
+ * will take care of rebinding.
+ */
return 0;
}
@@ -1310,6 +1452,42 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
return err;
}
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
+static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
+{
+ struct xe_vm *vm = pt_update->vops->vm;
+ struct xe_vma_ops *vops = pt_update->vops;
+ struct xe_vma_op *op;
+ int err;
+
+ err = xe_pt_pre_commit(pt_update);
+ if (err)
+ return err;
+
+ xe_svm_notifier_lock(vm);
+
+ list_for_each_entry(op, &vops->list, link) {
+ struct xe_svm_range *range = op->map_range.range;
+
+ if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
+ continue;
+
+ xe_svm_range_debug(range, "PRE-COMMIT");
+
+ xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
+ xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
+
+ if (!xe_svm_range_pages_valid(range)) {
+ xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
+ xe_svm_notifier_unlock(vm);
+ return -EAGAIN;
+ }
+ }
+
+ return 0;
+}
+#endif
+
struct invalidation_fence {
struct xe_gt_tlb_invalidation_fence base;
struct xe_gt *gt;
@@ -1333,8 +1511,7 @@ static void invalidation_fence_cb(struct dma_fence *fence,
queue_work(system_wq, &ifence->work);
} else {
ifence->base.base.error = ifence->fence->error;
- dma_fence_signal(&ifence->base.base);
- dma_fence_put(&ifence->base.base);
+ xe_gt_tlb_invalidation_fence_signal(&ifence->base);
}
dma_fence_put(ifence->fence);
}
@@ -1496,7 +1673,9 @@ static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = {
* xe_pt_stage_unbind() - Build page-table update structures for an unbind
* operation
* @tile: The tile we're unbinding for.
+ * @vm: The vm
* @vma: The vma we're unbinding.
+ * @range: The range we're unbinding.
* @entries: Caller-provided storage for the update structures.
*
* Builds page-table update structures for an unbind operation. The function
@@ -1506,24 +1685,30 @@ static const struct xe_pt_walk_ops xe_pt_stage_unbind_ops = {
*
* Return: The number of entries used.
*/
-static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma,
+static unsigned int xe_pt_stage_unbind(struct xe_tile *tile,
+ struct xe_vm *vm,
+ struct xe_vma *vma,
+ struct xe_svm_range *range,
struct xe_vm_pgtable_update *entries)
{
+ u64 start = range ? range->base.itree.start : xe_vma_start(vma);
+ u64 end = range ? range->base.itree.last + 1 : xe_vma_end(vma);
struct xe_pt_stage_unbind_walk xe_walk = {
.base = {
.ops = &xe_pt_stage_unbind_ops,
.shifts = xe_normal_pt_shifts,
.max_level = XE_PT_HIGHEST_LEVEL,
+ .staging = true,
},
.tile = tile,
- .modified_start = xe_vma_start(vma),
- .modified_end = xe_vma_end(vma),
+ .modified_start = start,
+ .modified_end = end,
.wupd.entries = entries,
};
- struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
+ struct xe_pt *pt = vm->pt_root[tile->id];
- (void)xe_pt_walk_shared(&pt->base, pt->level, xe_vma_start(vma),
- xe_vma_end(vma), &xe_walk.base);
+ (void)xe_pt_walk_shared(&pt->base, pt->level, start, end,
+ &xe_walk.base);
return xe_walk.wupd.num_used_entries;
}
@@ -1555,7 +1740,7 @@ static void xe_pt_abort_unbind(struct xe_vma *vma,
{
int i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = num_entries - 1; i >= 0; --i) {
struct xe_vm_pgtable_update *entry = &entries[i];
@@ -1568,7 +1753,7 @@ static void xe_pt_abort_unbind(struct xe_vma *vma,
continue;
for (j = entry->ofs; j < entry->ofs + entry->qwords; j++)
- pt_dir->children[j] =
+ pt_dir->staging[j] =
entries[i].pt_entries[j - entry->ofs].pt ?
&entries[i].pt_entries[j - entry->ofs].pt->base : NULL;
}
@@ -1581,7 +1766,7 @@ xe_pt_commit_prepare_unbind(struct xe_vma *vma,
{
int i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = 0; i < num_entries; ++i) {
struct xe_vm_pgtable_update *entry = &entries[i];
@@ -1595,20 +1780,20 @@ xe_pt_commit_prepare_unbind(struct xe_vma *vma,
pt_dir = as_xe_pt_dir(pt);
for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) {
entry->pt_entries[j - entry->ofs].pt =
- xe_pt_entry(pt_dir, j);
- pt_dir->children[j] = NULL;
+ xe_pt_entry_staging(pt_dir, j);
+ pt_dir->staging[j] = NULL;
}
}
}
static void
xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops,
- struct xe_vma *vma)
+ u64 start, u64 end)
{
+ u64 last;
u32 current_op = pt_update_ops->current_op;
struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
int i, level = 0;
- u64 start, last;
for (i = 0; i < pt_op->num_entries; i++) {
const struct xe_vm_pgtable_update *entry = &pt_op->entries[i];
@@ -1618,8 +1803,8 @@ xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops,
}
/* Greedy (non-optimal) calculation but simple */
- start = ALIGN_DOWN(xe_vma_start(vma), 0x1ull << xe_pt_shift(level));
- last = ALIGN(xe_vma_end(vma), 0x1ull << xe_pt_shift(level)) - 1;
+ start = ALIGN_DOWN(start, 0x1ull << xe_pt_shift(level));
+ last = ALIGN(end, 0x1ull << xe_pt_shift(level)) - 1;
if (start < pt_update_ops->start)
pt_update_ops->start = start;
@@ -1640,12 +1825,13 @@ static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma)
static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
- struct xe_vma *vma)
+ struct xe_vma *vma, bool invalidate_on_bind)
{
u32 current_op = pt_update_ops->current_op;
struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
int err;
+ xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
xe_bo_assert_held(xe_vma_bo(vma));
vm_dbg(&xe_vma_vm(vma)->xe->drm,
@@ -1660,15 +1846,17 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
if (err)
return err;
- err = xe_pt_prepare_bind(tile, vma, pt_op->entries,
- &pt_op->num_entries);
+ err = xe_pt_prepare_bind(tile, vma, NULL, pt_op->entries,
+ &pt_op->num_entries, invalidate_on_bind);
if (!err) {
xe_tile_assert(tile, pt_op->num_entries <=
ARRAY_SIZE(pt_op->entries));
xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
pt_op->num_entries, true);
- xe_pt_update_ops_rfence_interval(pt_update_ops, vma);
+ xe_pt_update_ops_rfence_interval(pt_update_ops,
+ xe_vma_start(vma),
+ xe_vma_end(vma));
++pt_update_ops->current_op;
pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
@@ -1681,11 +1869,11 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
* If !rebind, and scratch enabled VMs, there is a chance the scratch
* PTE is already cached in the TLB so it needs to be invalidated.
* On !LR VMs this is done in the ring ops preceding a batch, but on
- * non-faulting LR, in particular on user-space batch buffer chaining,
- * it needs to be done here.
+ * LR, in particular on user-space batch buffer chaining, it needs to
+ * be done here.
*/
if ((!pt_op->rebind && xe_vm_has_scratch(vm) &&
- xe_vm_in_preempt_fence_mode(vm)))
+ xe_vm_in_lr_mode(vm)))
pt_update_ops->needs_invalidation = true;
else if (pt_op->rebind && !xe_vm_in_lr_mode(vm))
/* We bump also if batch_invalidate_tlb is true */
@@ -1702,6 +1890,48 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
return err;
}
+static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_vma *vma, struct xe_svm_range *range)
+{
+ u32 current_op = pt_update_ops->current_op;
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+ int err;
+
+ xe_tile_assert(tile, xe_vma_is_cpu_addr_mirror(vma));
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "Preparing bind, with range [%lx...%lx)\n",
+ range->base.itree.start, range->base.itree.last);
+
+ pt_op->vma = NULL;
+ pt_op->bind = true;
+ pt_op->rebind = BIT(tile->id) & range->tile_present;
+
+ err = xe_pt_prepare_bind(tile, vma, range, pt_op->entries,
+ &pt_op->num_entries, false);
+ if (!err) {
+ xe_tile_assert(tile, pt_op->num_entries <=
+ ARRAY_SIZE(pt_op->entries));
+ xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
+ pt_op->num_entries, true);
+
+ xe_pt_update_ops_rfence_interval(pt_update_ops,
+ range->base.itree.start,
+ range->base.itree.last + 1);
+ ++pt_update_ops->current_op;
+ pt_update_ops->needs_svm_lock = true;
+
+ pt_op->vma = vma;
+ xe_pt_commit_prepare_bind(vma, pt_op->entries,
+ pt_op->num_entries, pt_op->rebind);
+ } else {
+ xe_pt_cancel_bind(vma, pt_op->entries, pt_op->num_entries);
+ }
+
+ return err;
+}
+
static int unbind_op_prepare(struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
struct xe_vma *vma)
@@ -1713,19 +1943,13 @@ static int unbind_op_prepare(struct xe_tile *tile,
if (!((vma->tile_present | vma->tile_staged) & BIT(tile->id)))
return 0;
+ xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
xe_bo_assert_held(xe_vma_bo(vma));
vm_dbg(&xe_vma_vm(vma)->xe->drm,
"Preparing unbind, with range [%llx...%llx)\n",
xe_vma_start(vma), xe_vma_end(vma) - 1);
- /*
- * Wait for invalidation to complete. Can corrupt internal page table
- * state if an invalidation is running while preparing an unbind.
- */
- if (xe_vma_is_userptr(vma) && xe_vm_in_fault_mode(xe_vma_vm(vma)))
- mmu_interval_read_begin(&to_userptr_vma(vma)->userptr.notifier);
-
pt_op->vma = vma;
pt_op->bind = false;
pt_op->rebind = false;
@@ -1734,11 +1958,13 @@ static int unbind_op_prepare(struct xe_tile *tile,
if (err)
return err;
- pt_op->num_entries = xe_pt_stage_unbind(tile, vma, pt_op->entries);
+ pt_op->num_entries = xe_pt_stage_unbind(tile, xe_vma_vm(vma),
+ vma, NULL, pt_op->entries);
xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
pt_op->num_entries, false);
- xe_pt_update_ops_rfence_interval(pt_update_ops, vma);
+ xe_pt_update_ops_rfence_interval(pt_update_ops, xe_vma_start(vma),
+ xe_vma_end(vma));
++pt_update_ops->current_op;
pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
pt_update_ops->needs_invalidation = true;
@@ -1748,6 +1974,42 @@ static int unbind_op_prepare(struct xe_tile *tile,
return 0;
}
+static int unbind_range_prepare(struct xe_vm *vm,
+ struct xe_tile *tile,
+ struct xe_vm_pgtable_update_ops *pt_update_ops,
+ struct xe_svm_range *range)
+{
+ u32 current_op = pt_update_ops->current_op;
+ struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[current_op];
+
+ if (!(range->tile_present & BIT(tile->id)))
+ return 0;
+
+ vm_dbg(&vm->xe->drm,
+ "Preparing unbind, with range [%lx...%lx)\n",
+ range->base.itree.start, range->base.itree.last);
+
+ pt_op->vma = XE_INVALID_VMA;
+ pt_op->bind = false;
+ pt_op->rebind = false;
+
+ pt_op->num_entries = xe_pt_stage_unbind(tile, vm, NULL, range,
+ pt_op->entries);
+
+ xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
+ pt_op->num_entries, false);
+ xe_pt_update_ops_rfence_interval(pt_update_ops, range->base.itree.start,
+ range->base.itree.last + 1);
+ ++pt_update_ops->current_op;
+ pt_update_ops->needs_svm_lock = true;
+ pt_update_ops->needs_invalidation = true;
+
+ xe_pt_commit_prepare_unbind(XE_INVALID_VMA, pt_op->entries,
+ pt_op->num_entries);
+
+ return 0;
+}
+
static int op_prepare(struct xe_vm *vm,
struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
@@ -1759,36 +2021,69 @@ static int op_prepare(struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
- if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ if ((!op->map.immediate && xe_vm_in_fault_mode(vm) &&
+ !op->map.invalidate_on_bind) ||
+ op->map.is_cpu_addr_mirror)
break;
- err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma);
+ err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma,
+ op->map.invalidate_on_bind);
pt_update_ops->wait_vm_kernel = true;
break;
case DRM_GPUVA_OP_REMAP:
- err = unbind_op_prepare(tile, pt_update_ops,
- gpuva_to_vma(op->base.remap.unmap->va));
+ {
+ struct xe_vma *old = gpuva_to_vma(op->base.remap.unmap->va);
+
+ if (xe_vma_is_cpu_addr_mirror(old))
+ break;
+
+ err = unbind_op_prepare(tile, pt_update_ops, old);
if (!err && op->remap.prev) {
err = bind_op_prepare(vm, tile, pt_update_ops,
- op->remap.prev);
+ op->remap.prev, false);
pt_update_ops->wait_vm_bookkeep = true;
}
if (!err && op->remap.next) {
err = bind_op_prepare(vm, tile, pt_update_ops,
- op->remap.next);
+ op->remap.next, false);
pt_update_ops->wait_vm_bookkeep = true;
}
break;
+ }
case DRM_GPUVA_OP_UNMAP:
- err = unbind_op_prepare(tile, pt_update_ops,
- gpuva_to_vma(op->base.unmap.va));
+ {
+ struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
+
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ break;
+
+ err = unbind_op_prepare(tile, pt_update_ops, vma);
break;
+ }
case DRM_GPUVA_OP_PREFETCH:
- err = bind_op_prepare(vm, tile, pt_update_ops,
- gpuva_to_vma(op->base.prefetch.va));
+ {
+ struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ break;
+
+ err = bind_op_prepare(vm, tile, pt_update_ops, vma, false);
pt_update_ops->wait_vm_kernel = true;
break;
+ }
+ case DRM_GPUVA_OP_DRIVER:
+ if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
+ xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
+
+ err = bind_range_prepare(vm, tile, pt_update_ops,
+ op->map_range.vma,
+ op->map_range.range);
+ } else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) {
+ err = unbind_range_prepare(vm, tile, pt_update_ops,
+ op->unmap_range.range);
+ }
+ break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
@@ -1851,12 +2146,15 @@ int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
return 0;
}
+ALLOW_ERROR_INJECTION(xe_pt_update_ops_prepare, ERRNO);
static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
struct xe_vma *vma, struct dma_fence *fence,
- struct dma_fence *fence2)
+ struct dma_fence *fence2, bool invalidate_on_bind)
{
+ xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
+
if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
pt_update_ops->wait_vm_bookkeep ?
@@ -1870,6 +2168,8 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
}
vma->tile_present |= BIT(tile->id);
vma->tile_staged &= ~BIT(tile->id);
+ if (invalidate_on_bind)
+ vma->tile_invalidated |= BIT(tile->id);
if (xe_vma_is_userptr(vma)) {
lockdep_assert_held_read(&vm->userptr.notifier_lock);
to_userptr_vma(vma)->userptr.initial_bind = true;
@@ -1890,6 +2190,8 @@ static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vma *vma, struct dma_fence *fence,
struct dma_fence *fence2)
{
+ xe_tile_assert(tile, !xe_vma_is_cpu_addr_mirror(vma));
+
if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
pt_update_ops->wait_vm_bookkeep ?
@@ -1924,32 +2226,66 @@ static void op_commit(struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
- if (!op->map.immediate && xe_vm_in_fault_mode(vm))
+ if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
+ op->map.is_cpu_addr_mirror)
break;
bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
- fence2);
+ fence2, op->map.invalidate_on_bind);
break;
case DRM_GPUVA_OP_REMAP:
- unbind_op_commit(vm, tile, pt_update_ops,
- gpuva_to_vma(op->base.remap.unmap->va), fence,
- fence2);
+ {
+ struct xe_vma *old = gpuva_to_vma(op->base.remap.unmap->va);
+
+ if (xe_vma_is_cpu_addr_mirror(old))
+ break;
+
+ unbind_op_commit(vm, tile, pt_update_ops, old, fence, fence2);
if (op->remap.prev)
bind_op_commit(vm, tile, pt_update_ops, op->remap.prev,
- fence, fence2);
+ fence, fence2, false);
if (op->remap.next)
bind_op_commit(vm, tile, pt_update_ops, op->remap.next,
- fence, fence2);
+ fence, fence2, false);
break;
+ }
case DRM_GPUVA_OP_UNMAP:
- unbind_op_commit(vm, tile, pt_update_ops,
- gpuva_to_vma(op->base.unmap.va), fence, fence2);
+ {
+ struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
+
+ if (!xe_vma_is_cpu_addr_mirror(vma))
+ unbind_op_commit(vm, tile, pt_update_ops, vma, fence,
+ fence2);
break;
+ }
case DRM_GPUVA_OP_PREFETCH:
- bind_op_commit(vm, tile, pt_update_ops,
- gpuva_to_vma(op->base.prefetch.va), fence, fence2);
+ {
+ struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+
+ if (!xe_vma_is_cpu_addr_mirror(vma))
+ bind_op_commit(vm, tile, pt_update_ops, vma, fence,
+ fence2, false);
+ break;
+ }
+ case DRM_GPUVA_OP_DRIVER:
+ {
+ /* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */
+
+ if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
+ WRITE_ONCE(op->map_range.range->tile_present,
+ op->map_range.range->tile_present |
+ BIT(tile->id));
+ WRITE_ONCE(op->map_range.range->tile_invalidated,
+ op->map_range.range->tile_invalidated &
+ ~BIT(tile->id));
+ } else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) {
+ WRITE_ONCE(op->unmap_range.range->tile_present,
+ op->unmap_range.range->tile_present &
+ ~BIT(tile->id));
+ }
break;
+ }
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
@@ -1967,6 +2303,16 @@ static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
.pre_commit = xe_pt_userptr_pre_commit,
};
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
+static const struct xe_migrate_pt_update_ops svm_migrate_ops = {
+ .populate = xe_vm_populate_pgtable,
+ .clear = xe_migrate_clear_pgtable_callback,
+ .pre_commit = xe_pt_svm_pre_commit,
+};
+#else
+static const struct xe_migrate_pt_update_ops svm_migrate_ops;
+#endif
+
/**
* xe_pt_update_ops_run() - Run PT update operations
* @tile: Tile of PT update operations
@@ -1992,7 +2338,9 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
struct xe_vma_op *op;
int err = 0, i;
struct xe_migrate_pt_update update = {
- .ops = pt_update_ops->needs_userptr_lock ?
+ .ops = pt_update_ops->needs_svm_lock ?
+ &svm_migrate_ops :
+ pt_update_ops->needs_userptr_lock ?
&userptr_migrate_ops :
&migrate_ops,
.vops = vops,
@@ -2113,6 +2461,8 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
&ifence->base.base, &mfence->base.base);
}
+ if (pt_update_ops->needs_svm_lock)
+ xe_svm_notifier_unlock(vm);
if (pt_update_ops->needs_userptr_lock)
up_read(&vm->userptr.notifier_lock);
@@ -2131,6 +2481,7 @@ kill_vm_tile1:
return ERR_PTR(err);
}
+ALLOW_ERROR_INJECTION(xe_pt_update_ops_run, ERRNO);
/**
* xe_pt_update_ops_fini() - Finish PT update operations
diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h
index 9ab386431cad..5ecf003d513c 100644
--- a/drivers/gpu/drm/xe/xe_pt.h
+++ b/drivers/gpu/drm/xe/xe_pt.h
@@ -13,6 +13,7 @@ struct dma_fence;
struct xe_bo;
struct xe_device;
struct xe_exec_queue;
+struct xe_svm_range;
struct xe_sync_entry;
struct xe_tile;
struct xe_vm;
@@ -35,6 +36,8 @@ void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred);
+void xe_pt_clear(struct xe_device *xe, struct xe_pt *pt);
+
int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops);
struct dma_fence *xe_pt_update_ops_run(struct xe_tile *tile,
struct xe_vma_ops *vops);
@@ -42,5 +45,7 @@ void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops);
void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops);
bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma);
+bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm,
+ struct xe_svm_range *range);
#endif
diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h
index 384cc04de719..69eab6f37cfe 100644
--- a/drivers/gpu/drm/xe/xe_pt_types.h
+++ b/drivers/gpu/drm/xe/xe_pt_types.h
@@ -104,6 +104,8 @@ struct xe_vm_pgtable_update_ops {
u32 num_ops;
/** @current_op: current operations */
u32 current_op;
+ /** @needs_svm_lock: Needs SVM lock */
+ bool needs_svm_lock;
/** @needs_userptr_lock: Needs userptr lock */
bool needs_userptr_lock;
/** @needs_invalidation: Needs invalidation */
diff --git a/drivers/gpu/drm/xe/xe_pt_walk.c b/drivers/gpu/drm/xe/xe_pt_walk.c
index b8b3d2aea492..be602a763ff3 100644
--- a/drivers/gpu/drm/xe/xe_pt_walk.c
+++ b/drivers/gpu/drm/xe/xe_pt_walk.c
@@ -74,7 +74,8 @@ int xe_pt_walk_range(struct xe_ptw *parent, unsigned int level,
u64 addr, u64 end, struct xe_pt_walk *walk)
{
pgoff_t offset = xe_pt_offset(addr, level, walk);
- struct xe_ptw **entries = parent->children ? parent->children : NULL;
+ struct xe_ptw **entries = walk->staging ? (parent->staging ?: NULL) :
+ (parent->children ?: NULL);
const struct xe_pt_walk_ops *ops = walk->ops;
enum page_walk_action action;
struct xe_ptw *child;
diff --git a/drivers/gpu/drm/xe/xe_pt_walk.h b/drivers/gpu/drm/xe/xe_pt_walk.h
index 5ecc4d2f0f65..5c02c244f7de 100644
--- a/drivers/gpu/drm/xe/xe_pt_walk.h
+++ b/drivers/gpu/drm/xe/xe_pt_walk.h
@@ -11,12 +11,14 @@
/**
* struct xe_ptw - base class for driver pagetable subclassing.
* @children: Pointer to an array of children if any.
+ * @staging: Pointer to an array of staging if any.
*
* Drivers could subclass this, and if it's a page-directory, typically
* embed an array of xe_ptw pointers.
*/
struct xe_ptw {
struct xe_ptw **children;
+ struct xe_ptw **staging;
};
/**
@@ -41,6 +43,8 @@ struct xe_pt_walk {
* as shared pagetables.
*/
bool shared_pt_mode;
+ /** @staging: Walk staging PT structure */
+ bool staging;
};
/**
diff --git a/drivers/gpu/drm/xe/xe_pxp.c b/drivers/gpu/drm/xe/xe_pxp.c
new file mode 100644
index 000000000000..b5bc15f436fa
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pxp.c
@@ -0,0 +1,923 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2024 Intel Corporation.
+ */
+
+#include "xe_pxp.h"
+
+#include <drm/drm_managed.h>
+#include <uapi/drm/xe_drm.h>
+
+#include "xe_bo.h"
+#include "xe_bo_types.h"
+#include "xe_device_types.h"
+#include "xe_exec_queue.h"
+#include "xe_force_wake.h"
+#include "xe_guc_submit.h"
+#include "xe_gsc_proxy.h"
+#include "xe_gt.h"
+#include "xe_gt_types.h"
+#include "xe_huc.h"
+#include "xe_mmio.h"
+#include "xe_pm.h"
+#include "xe_pxp_submit.h"
+#include "xe_pxp_types.h"
+#include "xe_uc_fw.h"
+#include "regs/xe_irq_regs.h"
+#include "regs/xe_pxp_regs.h"
+
+/**
+ * DOC: PXP
+ *
+ * PXP (Protected Xe Path) allows execution and flip to display of protected
+ * (i.e. encrypted) objects. This feature is currently only supported in
+ * integrated parts.
+ */
+
+#define ARB_SESSION DRM_XE_PXP_HWDRM_DEFAULT_SESSION /* shorter define */
+
+/*
+ * A submission to GSC can take up to 250ms to complete, so use a 300ms
+ * timeout for activation where only one of those is involved. Termination
+ * additionally requires a submission to VCS and an interaction with KCR, so
+ * bump the timeout to 500ms for that.
+ */
+#define PXP_ACTIVATION_TIMEOUT_MS 300
+#define PXP_TERMINATION_TIMEOUT_MS 500
+
+bool xe_pxp_is_supported(const struct xe_device *xe)
+{
+ return xe->info.has_pxp && IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY);
+}
+
+bool xe_pxp_is_enabled(const struct xe_pxp *pxp)
+{
+ return pxp;
+}
+
+static bool pxp_prerequisites_done(const struct xe_pxp *pxp)
+{
+ struct xe_gt *gt = pxp->gt;
+ unsigned int fw_ref;
+ bool ready;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+
+ /*
+ * If force_wake fails we could falsely report the prerequisites as not
+ * done even if they are; the consequence of this would be that the
+ * callers won't go ahead with using PXP, but if force_wake doesn't work
+ * the GT is very likely in a bad state so not really a problem to abort
+ * PXP. Therefore, we can just log the force_wake error and not escalate
+ * it.
+ */
+ XE_WARN_ON(!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL));
+
+ /* PXP requires both HuC authentication via GSC and GSC proxy initialized */
+ ready = xe_huc_is_authenticated(&gt->uc.huc, XE_HUC_AUTH_VIA_GSC) &&
+ xe_gsc_proxy_init_done(&gt->uc.gsc);
+
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+
+ return ready;
+}
+
+/**
+ * xe_pxp_get_readiness_status - check whether PXP is ready for userspace use
+ * @pxp: the xe_pxp pointer (can be NULL if PXP is disabled)
+ *
+ * Returns: 0 if PXP is not ready yet, 1 if it is ready, a negative errno value
+ * if PXP is not supported/enabled or if something went wrong in the
+ * initialization of the prerequisites. Note that the return values of this
+ * function follow the uapi (see drm_xe_query_pxp_status), so they can be used
+ * directly in the query ioctl.
+ */
+int xe_pxp_get_readiness_status(struct xe_pxp *pxp)
+{
+ int ret = 0;
+
+ if (!xe_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ /* if the GSC or HuC FW are in an error state, PXP will never work */
+ if (xe_uc_fw_status_to_error(pxp->gt->uc.huc.fw.status) ||
+ xe_uc_fw_status_to_error(pxp->gt->uc.gsc.fw.status))
+ return -EIO;
+
+ xe_pm_runtime_get(pxp->xe);
+
+ /* PXP requires both HuC loaded and GSC proxy initialized */
+ if (pxp_prerequisites_done(pxp))
+ ret = 1;
+
+ xe_pm_runtime_put(pxp->xe);
+ return ret;
+}
+
+static bool pxp_session_is_in_play(struct xe_pxp *pxp, u32 id)
+{
+ struct xe_gt *gt = pxp->gt;
+
+ return xe_mmio_read32(&gt->mmio, KCR_SIP) & BIT(id);
+}
+
+static int pxp_wait_for_session_state(struct xe_pxp *pxp, u32 id, bool in_play)
+{
+ struct xe_gt *gt = pxp->gt;
+ u32 mask = BIT(id);
+
+ return xe_mmio_wait32(&gt->mmio, KCR_SIP, mask, in_play ? mask : 0,
+ 250, NULL, false);
+}
+
+static void pxp_invalidate_queues(struct xe_pxp *pxp);
+
+static int pxp_terminate_hw(struct xe_pxp *pxp)
+{
+ struct xe_gt *gt = pxp->gt;
+ unsigned int fw_ref;
+ int ret = 0;
+
+ drm_dbg(&pxp->xe->drm, "Terminating PXP\n");
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /* terminate the hw session */
+ ret = xe_pxp_submit_session_termination(pxp, ARB_SESSION);
+ if (ret)
+ goto out;
+
+ ret = pxp_wait_for_session_state(pxp, ARB_SESSION, false);
+ if (ret)
+ goto out;
+
+ /* Trigger full HW cleanup */
+ xe_mmio_write32(&gt->mmio, KCR_GLOBAL_TERMINATE, 1);
+
+ /* now we can tell the GSC to clean up its own state */
+ ret = xe_pxp_submit_session_invalidation(&pxp->gsc_res, ARB_SESSION);
+
+out:
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return ret;
+}
+
+static void mark_termination_in_progress(struct xe_pxp *pxp)
+{
+ lockdep_assert_held(&pxp->mutex);
+
+ reinit_completion(&pxp->termination);
+ pxp->status = XE_PXP_TERMINATION_IN_PROGRESS;
+}
+
+static void pxp_terminate(struct xe_pxp *pxp)
+{
+ int ret = 0;
+ struct xe_device *xe = pxp->xe;
+
+ if (!wait_for_completion_timeout(&pxp->activation,
+ msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
+ drm_err(&xe->drm, "failed to wait for PXP start before termination\n");
+
+ mutex_lock(&pxp->mutex);
+
+ if (pxp->status == XE_PXP_ACTIVE)
+ pxp->key_instance++;
+
+ /*
+ * we'll mark the status as needing termination on resume, so no need to
+ * emit a termination now.
+ */
+ if (pxp->status == XE_PXP_SUSPENDED) {
+ mutex_unlock(&pxp->mutex);
+ return;
+ }
+
+ /*
+ * If we have a termination already in progress, we need to wait for
+ * it to complete before queueing another one. Once the first
+ * termination is completed we'll set the state back to
+ * NEEDS_TERMINATION and leave it to the pxp start code to issue it.
+ */
+ if (pxp->status == XE_PXP_TERMINATION_IN_PROGRESS) {
+ pxp->status = XE_PXP_NEEDS_ADDITIONAL_TERMINATION;
+ mutex_unlock(&pxp->mutex);
+ return;
+ }
+
+ mark_termination_in_progress(pxp);
+
+ mutex_unlock(&pxp->mutex);
+
+ pxp_invalidate_queues(pxp);
+
+ ret = pxp_terminate_hw(pxp);
+ if (ret) {
+ drm_err(&xe->drm, "PXP termination failed: %pe\n", ERR_PTR(ret));
+ mutex_lock(&pxp->mutex);
+ pxp->status = XE_PXP_ERROR;
+ complete_all(&pxp->termination);
+ mutex_unlock(&pxp->mutex);
+ }
+}
+
+static void pxp_terminate_complete(struct xe_pxp *pxp)
+{
+ /*
+ * We expect PXP to be in one of 3 states when we get here:
+ * - XE_PXP_TERMINATION_IN_PROGRESS: a single termination event was
+ * requested and it is now completing, so we're ready to start.
+ * - XE_PXP_NEEDS_ADDITIONAL_TERMINATION: a second termination was
+ * requested while the first one was still being processed.
+ * - XE_PXP_SUSPENDED: PXP is now suspended, so we defer everything to
+ * when we come back on resume.
+ */
+ mutex_lock(&pxp->mutex);
+
+ switch (pxp->status) {
+ case XE_PXP_TERMINATION_IN_PROGRESS:
+ pxp->status = XE_PXP_READY_TO_START;
+ break;
+ case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
+ pxp->status = XE_PXP_NEEDS_TERMINATION;
+ break;
+ case XE_PXP_SUSPENDED:
+ /* Nothing to do */
+ break;
+ default:
+ drm_err(&pxp->xe->drm,
+ "PXP termination complete while status was %u\n",
+ pxp->status);
+ }
+
+ complete_all(&pxp->termination);
+
+ mutex_unlock(&pxp->mutex);
+}
+
+static void pxp_irq_work(struct work_struct *work)
+{
+ struct xe_pxp *pxp = container_of(work, typeof(*pxp), irq.work);
+ struct xe_device *xe = pxp->xe;
+ u32 events = 0;
+
+ spin_lock_irq(&xe->irq.lock);
+ events = pxp->irq.events;
+ pxp->irq.events = 0;
+ spin_unlock_irq(&xe->irq.lock);
+
+ if (!events)
+ return;
+
+ /*
+ * If we're processing a termination irq while suspending then don't
+ * bother, we're going to re-init everything on resume anyway.
+ */
+ if ((events & PXP_TERMINATION_REQUEST) && !xe_pm_runtime_get_if_active(xe))
+ return;
+
+ if (events & PXP_TERMINATION_REQUEST) {
+ events &= ~PXP_TERMINATION_COMPLETE;
+ pxp_terminate(pxp);
+ }
+
+ if (events & PXP_TERMINATION_COMPLETE)
+ pxp_terminate_complete(pxp);
+
+ if (events & PXP_TERMINATION_REQUEST)
+ xe_pm_runtime_put(xe);
+}
+
+/**
+ * xe_pxp_irq_handler - Handles PXP interrupts.
+ * @xe: the xe_device structure
+ * @iir: interrupt vector
+ */
+void xe_pxp_irq_handler(struct xe_device *xe, u16 iir)
+{
+ struct xe_pxp *pxp = xe->pxp;
+
+ if (!xe_pxp_is_enabled(pxp)) {
+ drm_err(&xe->drm, "PXP irq 0x%x received with PXP disabled!\n", iir);
+ return;
+ }
+
+ lockdep_assert_held(&xe->irq.lock);
+
+ if (unlikely(!iir))
+ return;
+
+ if (iir & (KCR_PXP_STATE_TERMINATED_INTERRUPT |
+ KCR_APP_TERMINATED_PER_FW_REQ_INTERRUPT))
+ pxp->irq.events |= PXP_TERMINATION_REQUEST;
+
+ if (iir & KCR_PXP_STATE_RESET_COMPLETE_INTERRUPT)
+ pxp->irq.events |= PXP_TERMINATION_COMPLETE;
+
+ if (pxp->irq.events)
+ queue_work(pxp->irq.wq, &pxp->irq.work);
+}
+
+static int kcr_pxp_set_status(const struct xe_pxp *pxp, bool enable)
+{
+ u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
+ _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
+ unsigned int fw_ref;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(pxp->gt), XE_FW_GT);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT))
+ return -EIO;
+
+ xe_mmio_write32(&pxp->gt->mmio, KCR_INIT, val);
+ xe_force_wake_put(gt_to_fw(pxp->gt), fw_ref);
+
+ return 0;
+}
+
+static int kcr_pxp_enable(const struct xe_pxp *pxp)
+{
+ return kcr_pxp_set_status(pxp, true);
+}
+
+static int kcr_pxp_disable(const struct xe_pxp *pxp)
+{
+ return kcr_pxp_set_status(pxp, false);
+}
+
+static void pxp_fini(void *arg)
+{
+ struct xe_pxp *pxp = arg;
+
+ destroy_workqueue(pxp->irq.wq);
+ xe_pxp_destroy_execution_resources(pxp);
+
+ /* no need to explicitly disable KCR since we're going to do an FLR */
+}
+
+/**
+ * xe_pxp_init - initialize PXP support
+ * @xe: the xe_device structure
+ *
+ * Initialize the HW state and allocate the objects required for PXP support.
+ * Note that some of the requirement for PXP support (GSC proxy init, HuC auth)
+ * are performed asynchronously as part of the GSC init. PXP can only be used
+ * after both this function and the async worker have completed.
+ *
+ * Returns 0 if PXP is not supported or if PXP initialization is successful,
+ * other errno value if there is an error during the init.
+ */
+int xe_pxp_init(struct xe_device *xe)
+{
+ struct xe_gt *gt = xe->tiles[0].media_gt;
+ struct xe_pxp *pxp;
+ int err;
+
+ if (!xe_pxp_is_supported(xe))
+ return 0;
+
+ /* we only support PXP on single tile devices with a media GT */
+ if (xe->info.tile_count > 1 || !gt)
+ return 0;
+
+ /* The GSCCS is required for submissions to the GSC FW */
+ if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
+ return 0;
+
+ /* PXP requires both GSC and HuC firmwares to be available */
+ if (!xe_uc_fw_is_loadable(&gt->uc.gsc.fw) ||
+ !xe_uc_fw_is_loadable(&gt->uc.huc.fw)) {
+ drm_info(&xe->drm, "skipping PXP init due to missing FW dependencies");
+ return 0;
+ }
+
+ pxp = drmm_kzalloc(&xe->drm, sizeof(struct xe_pxp), GFP_KERNEL);
+ if (!pxp) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&pxp->queues.list);
+ spin_lock_init(&pxp->queues.lock);
+ INIT_WORK(&pxp->irq.work, pxp_irq_work);
+ pxp->xe = xe;
+ pxp->gt = gt;
+
+ pxp->key_instance = 1;
+ pxp->last_suspend_key_instance = 1;
+
+ /*
+ * we'll use the completions to check if there is an action pending,
+ * so we start them as completed and we reinit it when an action is
+ * triggered.
+ */
+ init_completion(&pxp->activation);
+ init_completion(&pxp->termination);
+ complete_all(&pxp->termination);
+ complete_all(&pxp->activation);
+
+ mutex_init(&pxp->mutex);
+
+ pxp->irq.wq = alloc_ordered_workqueue("pxp-wq", 0);
+ if (!pxp->irq.wq) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ err = kcr_pxp_enable(pxp);
+ if (err)
+ goto out_wq;
+
+ err = xe_pxp_allocate_execution_resources(pxp);
+ if (err)
+ goto out_kcr_disable;
+
+ xe->pxp = pxp;
+
+ return devm_add_action_or_reset(xe->drm.dev, pxp_fini, pxp);
+
+out_kcr_disable:
+ kcr_pxp_disable(pxp);
+out_wq:
+ destroy_workqueue(pxp->irq.wq);
+out_free:
+ drmm_kfree(&xe->drm, pxp);
+out:
+ drm_err(&xe->drm, "PXP initialization failed: %pe\n", ERR_PTR(err));
+ return err;
+}
+
+static int __pxp_start_arb_session(struct xe_pxp *pxp)
+{
+ int ret;
+ unsigned int fw_ref;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(pxp->gt), XE_FW_GT);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_GT))
+ return -EIO;
+
+ if (pxp_session_is_in_play(pxp, ARB_SESSION)) {
+ ret = -EEXIST;
+ goto out_force_wake;
+ }
+
+ ret = xe_pxp_submit_session_init(&pxp->gsc_res, ARB_SESSION);
+ if (ret) {
+ drm_err(&pxp->xe->drm, "Failed to init PXP arb session: %pe\n", ERR_PTR(ret));
+ goto out_force_wake;
+ }
+
+ ret = pxp_wait_for_session_state(pxp, ARB_SESSION, true);
+ if (ret) {
+ drm_err(&pxp->xe->drm, "PXP ARB session failed to go in play%pe\n", ERR_PTR(ret));
+ goto out_force_wake;
+ }
+
+ drm_dbg(&pxp->xe->drm, "PXP ARB session is active\n");
+
+out_force_wake:
+ xe_force_wake_put(gt_to_fw(pxp->gt), fw_ref);
+ return ret;
+}
+
+/**
+ * xe_pxp_exec_queue_set_type - Mark a queue as using PXP
+ * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
+ * @q: the queue to mark as using PXP
+ * @type: the type of PXP session this queue will use
+ *
+ * Returns 0 if the selected PXP type is supported, -ENODEV otherwise.
+ */
+int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 type)
+{
+ if (!xe_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ /* we only support HWDRM sessions right now */
+ xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM);
+
+ q->pxp.type = type;
+
+ return 0;
+}
+
+static void __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
+{
+ spin_lock_irq(&pxp->queues.lock);
+ list_add_tail(&q->pxp.link, &pxp->queues.list);
+ spin_unlock_irq(&pxp->queues.lock);
+}
+
+/**
+ * xe_pxp_exec_queue_add - add a queue to the PXP list
+ * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
+ * @q: the queue to add to the list
+ *
+ * If PXP is enabled and the prerequisites are done, start the PXP ARB
+ * session (if not already running) and add the queue to the PXP list. Note
+ * that the queue must have previously been marked as using PXP with
+ * xe_pxp_exec_queue_set_type.
+ *
+ * Returns 0 if the PXP ARB session is running and the queue is in the list,
+ * -ENODEV if PXP is disabled, -EBUSY if the PXP prerequisites are not done,
+ * other errno value if something goes wrong during the session start.
+ */
+int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
+{
+ int ret = 0;
+
+ if (!xe_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ /* we only support HWDRM sessions right now */
+ xe_assert(pxp->xe, q->pxp.type == DRM_XE_PXP_TYPE_HWDRM);
+
+ /*
+ * Runtime suspend kills PXP, so we take a reference to prevent it from
+ * happening while we have active queues that use PXP
+ */
+ xe_pm_runtime_get(pxp->xe);
+
+ /* get_readiness_status() returns 0 for in-progress and 1 for done */
+ ret = xe_pxp_get_readiness_status(pxp);
+ if (ret <= 0) {
+ if (!ret)
+ ret = -EBUSY;
+ goto out;
+ }
+ ret = 0;
+
+wait_for_idle:
+ /*
+ * if there is an action in progress, wait for it. We need to wait
+ * outside the lock because the completion is done from within the lock.
+ * Note that the two action should never be pending at the same time.
+ */
+ if (!wait_for_completion_timeout(&pxp->termination,
+ msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS))) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(&pxp->activation,
+ msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS))) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ mutex_lock(&pxp->mutex);
+
+ /* If PXP is not already active, turn it on */
+ switch (pxp->status) {
+ case XE_PXP_ERROR:
+ ret = -EIO;
+ break;
+ case XE_PXP_ACTIVE:
+ __exec_queue_add(pxp, q);
+ mutex_unlock(&pxp->mutex);
+ goto out;
+ case XE_PXP_READY_TO_START:
+ pxp->status = XE_PXP_START_IN_PROGRESS;
+ reinit_completion(&pxp->activation);
+ break;
+ case XE_PXP_START_IN_PROGRESS:
+ /* If a start is in progress then the completion must not be done */
+ XE_WARN_ON(completion_done(&pxp->activation));
+ mutex_unlock(&pxp->mutex);
+ goto wait_for_idle;
+ case XE_PXP_NEEDS_TERMINATION:
+ mark_termination_in_progress(pxp);
+ break;
+ case XE_PXP_TERMINATION_IN_PROGRESS:
+ case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
+ /* If a termination is in progress then the completion must not be done */
+ XE_WARN_ON(completion_done(&pxp->termination));
+ mutex_unlock(&pxp->mutex);
+ goto wait_for_idle;
+ case XE_PXP_SUSPENDED:
+ default:
+ drm_err(&pxp->xe->drm, "unexpected state during PXP start: %u\n", pxp->status);
+ ret = -EIO;
+ break;
+ }
+
+ mutex_unlock(&pxp->mutex);
+
+ if (ret)
+ goto out;
+
+ if (!completion_done(&pxp->termination)) {
+ ret = pxp_terminate_hw(pxp);
+ if (ret) {
+ drm_err(&pxp->xe->drm, "PXP termination failed before start\n");
+ mutex_lock(&pxp->mutex);
+ pxp->status = XE_PXP_ERROR;
+ mutex_unlock(&pxp->mutex);
+
+ goto out;
+ }
+
+ goto wait_for_idle;
+ }
+
+ /* All the cases except for start should have exited earlier */
+ XE_WARN_ON(completion_done(&pxp->activation));
+ ret = __pxp_start_arb_session(pxp);
+
+ mutex_lock(&pxp->mutex);
+
+ complete_all(&pxp->activation);
+
+ /*
+ * Any other process should wait until the state goes away from
+ * XE_PXP_START_IN_PROGRESS, so if the state is not that something went
+ * wrong. Mark the status as needing termination and try again.
+ */
+ if (pxp->status != XE_PXP_START_IN_PROGRESS) {
+ drm_err(&pxp->xe->drm, "unexpected state after PXP start: %u\n", pxp->status);
+ pxp->status = XE_PXP_NEEDS_TERMINATION;
+ mutex_unlock(&pxp->mutex);
+ goto wait_for_idle;
+ }
+
+ /* If everything went ok, update the status and add the queue to the list */
+ if (!ret) {
+ pxp->status = XE_PXP_ACTIVE;
+ __exec_queue_add(pxp, q);
+ } else {
+ pxp->status = XE_PXP_ERROR;
+ }
+
+ mutex_unlock(&pxp->mutex);
+
+out:
+ /*
+ * in the successful case the PM ref is released from
+ * xe_pxp_exec_queue_remove
+ */
+ if (ret)
+ xe_pm_runtime_put(pxp->xe);
+
+ return ret;
+}
+
+static void __pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q, bool lock)
+{
+ bool need_pm_put = false;
+
+ if (!xe_pxp_is_enabled(pxp))
+ return;
+
+ if (lock)
+ spin_lock_irq(&pxp->queues.lock);
+
+ if (!list_empty(&q->pxp.link)) {
+ list_del_init(&q->pxp.link);
+ need_pm_put = true;
+ }
+
+ q->pxp.type = DRM_XE_PXP_TYPE_NONE;
+
+ if (lock)
+ spin_unlock_irq(&pxp->queues.lock);
+
+ if (need_pm_put)
+ xe_pm_runtime_put(pxp->xe);
+}
+
+/**
+ * xe_pxp_exec_queue_remove - remove a queue from the PXP list
+ * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
+ * @q: the queue to remove from the list
+ *
+ * If PXP is enabled and the exec_queue is in the list, the queue will be
+ * removed from the list and its PM reference will be released. It is safe to
+ * call this function multiple times for the same queue.
+ */
+void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q)
+{
+ __pxp_exec_queue_remove(pxp, q, true);
+}
+
+static void pxp_invalidate_queues(struct xe_pxp *pxp)
+{
+ struct xe_exec_queue *tmp, *q;
+ LIST_HEAD(to_clean);
+
+ spin_lock_irq(&pxp->queues.lock);
+
+ list_for_each_entry_safe(q, tmp, &pxp->queues.list, pxp.link) {
+ q = xe_exec_queue_get_unless_zero(q);
+ if (!q)
+ continue;
+
+ list_move_tail(&q->pxp.link, &to_clean);
+ }
+ spin_unlock_irq(&pxp->queues.lock);
+
+ list_for_each_entry_safe(q, tmp, &to_clean, pxp.link) {
+ xe_exec_queue_kill(q);
+
+ /*
+ * We hold a ref to the queue so there is no risk of racing with
+ * the calls to exec_queue_remove coming from exec_queue_destroy.
+ */
+ __pxp_exec_queue_remove(pxp, q, false);
+
+ xe_exec_queue_put(q);
+ }
+}
+
+/**
+ * xe_pxp_key_assign - mark a BO as using the current PXP key iteration
+ * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
+ * @bo: the BO to mark
+ *
+ * Returns: -ENODEV if PXP is disabled, 0 otherwise.
+ */
+int xe_pxp_key_assign(struct xe_pxp *pxp, struct xe_bo *bo)
+{
+ if (!xe_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ xe_assert(pxp->xe, !bo->pxp_key_instance);
+
+ /*
+ * Note that the PXP key handling is inherently racey, because the key
+ * can theoretically change at any time (although it's unlikely to do
+ * so without triggers), even right after we copy it. Taking a lock
+ * wouldn't help because the value might still change as soon as we
+ * release the lock.
+ * Userspace needs to handle the fact that their BOs can go invalid at
+ * any point.
+ */
+ bo->pxp_key_instance = pxp->key_instance;
+
+ return 0;
+}
+
+/**
+ * xe_pxp_bo_key_check - check if the key used by a xe_bo is valid
+ * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
+ * @bo: the BO we want to check
+ *
+ * Checks whether a BO was encrypted with the current key or an obsolete one.
+ *
+ * Returns: 0 if the key is valid, -ENODEV if PXP is disabled, -EINVAL if the
+ * BO is not using PXP, -ENOEXEC if the key is not valid.
+ */
+int xe_pxp_bo_key_check(struct xe_pxp *pxp, struct xe_bo *bo)
+{
+ if (!xe_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ if (!xe_bo_is_protected(bo))
+ return -EINVAL;
+
+ xe_assert(pxp->xe, bo->pxp_key_instance);
+
+ /*
+ * Note that the PXP key handling is inherently racey, because the key
+ * can theoretically change at any time (although it's unlikely to do
+ * so without triggers), even right after we check it. Taking a lock
+ * wouldn't help because the value might still change as soon as we
+ * release the lock.
+ * We mitigate the risk by checking the key at multiple points (on each
+ * submission involving the BO and right before flipping it on the
+ * display), but there is still a very small chance that we could
+ * operate on an invalid BO for a single submission or a single frame
+ * flip. This is a compromise made to protect the encrypted data (which
+ * is what the key termination is for).
+ */
+ if (bo->pxp_key_instance != pxp->key_instance)
+ return -ENOEXEC;
+
+ return 0;
+}
+
+/**
+ * xe_pxp_obj_key_check - check if the key used by a drm_gem_obj is valid
+ * @obj: the drm_gem_obj we want to check
+ *
+ * Checks whether a drm_gem_obj was encrypted with the current key or an
+ * obsolete one.
+ *
+ * Returns: 0 if the key is valid, -ENODEV if PXP is disabled, -EINVAL if the
+ * obj is not using PXP, -ENOEXEC if the key is not valid.
+ */
+int xe_pxp_obj_key_check(struct drm_gem_object *obj)
+{
+ struct xe_bo *bo = gem_to_xe_bo(obj);
+ struct xe_device *xe = xe_bo_device(bo);
+ struct xe_pxp *pxp = xe->pxp;
+
+ return xe_pxp_bo_key_check(pxp, bo);
+}
+
+/**
+ * xe_pxp_pm_suspend - prepare PXP for HW suspend
+ * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
+ *
+ * Makes sure all PXP actions have completed and invalidates all PXP queues
+ * and objects before we go into a suspend state.
+ *
+ * Returns: 0 if successful, a negative errno value otherwise.
+ */
+int xe_pxp_pm_suspend(struct xe_pxp *pxp)
+{
+ bool needs_queue_inval = false;
+ int ret = 0;
+
+ if (!xe_pxp_is_enabled(pxp))
+ return 0;
+
+wait_for_activation:
+ if (!wait_for_completion_timeout(&pxp->activation,
+ msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
+ ret = -ETIMEDOUT;
+
+ mutex_lock(&pxp->mutex);
+
+ switch (pxp->status) {
+ case XE_PXP_ERROR:
+ case XE_PXP_READY_TO_START:
+ case XE_PXP_SUSPENDED:
+ case XE_PXP_TERMINATION_IN_PROGRESS:
+ case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
+ /*
+ * If PXP is not running there is nothing to cleanup. If there
+ * is a termination pending then no need to issue another one.
+ */
+ break;
+ case XE_PXP_START_IN_PROGRESS:
+ mutex_unlock(&pxp->mutex);
+ goto wait_for_activation;
+ case XE_PXP_NEEDS_TERMINATION:
+ /* If PXP was never used we can skip the cleanup */
+ if (pxp->key_instance == pxp->last_suspend_key_instance)
+ break;
+ fallthrough;
+ case XE_PXP_ACTIVE:
+ pxp->key_instance++;
+ needs_queue_inval = true;
+ break;
+ default:
+ drm_err(&pxp->xe->drm, "unexpected state during PXP suspend: %u",
+ pxp->status);
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * We set this even if we were in error state, hoping the suspend clears
+ * the error. Worse case we fail again and go in error state again.
+ */
+ pxp->status = XE_PXP_SUSPENDED;
+
+ mutex_unlock(&pxp->mutex);
+
+ if (needs_queue_inval)
+ pxp_invalidate_queues(pxp);
+
+ /*
+ * if there is a termination in progress, wait for it.
+ * We need to wait outside the lock because the completion is done from
+ * within the lock
+ */
+ if (!wait_for_completion_timeout(&pxp->termination,
+ msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS)))
+ ret = -ETIMEDOUT;
+
+ pxp->last_suspend_key_instance = pxp->key_instance;
+
+out:
+ return ret;
+}
+
+/**
+ * xe_pxp_pm_resume - re-init PXP after HW suspend
+ * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
+ */
+void xe_pxp_pm_resume(struct xe_pxp *pxp)
+{
+ int err;
+
+ if (!xe_pxp_is_enabled(pxp))
+ return;
+
+ err = kcr_pxp_enable(pxp);
+
+ mutex_lock(&pxp->mutex);
+
+ xe_assert(pxp->xe, pxp->status == XE_PXP_SUSPENDED);
+
+ if (err)
+ pxp->status = XE_PXP_ERROR;
+ else
+ pxp->status = XE_PXP_NEEDS_TERMINATION;
+
+ mutex_unlock(&pxp->mutex);
+}
diff --git a/drivers/gpu/drm/xe/xe_pxp.h b/drivers/gpu/drm/xe/xe_pxp.h
new file mode 100644
index 000000000000..71a23280b900
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pxp.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2024, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __XE_PXP_H__
+#define __XE_PXP_H__
+
+#include <linux/types.h>
+
+struct drm_gem_object;
+struct xe_bo;
+struct xe_device;
+struct xe_exec_queue;
+struct xe_pxp;
+
+bool xe_pxp_is_supported(const struct xe_device *xe);
+bool xe_pxp_is_enabled(const struct xe_pxp *pxp);
+int xe_pxp_get_readiness_status(struct xe_pxp *pxp);
+
+int xe_pxp_init(struct xe_device *xe);
+void xe_pxp_irq_handler(struct xe_device *xe, u16 iir);
+
+int xe_pxp_pm_suspend(struct xe_pxp *pxp);
+void xe_pxp_pm_resume(struct xe_pxp *pxp);
+
+int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 type);
+int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q);
+void xe_pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q);
+
+int xe_pxp_key_assign(struct xe_pxp *pxp, struct xe_bo *bo);
+int xe_pxp_bo_key_check(struct xe_pxp *pxp, struct xe_bo *bo);
+int xe_pxp_obj_key_check(struct drm_gem_object *obj);
+
+#endif /* __XE_PXP_H__ */
diff --git a/drivers/gpu/drm/xe/xe_pxp_debugfs.c b/drivers/gpu/drm/xe/xe_pxp_debugfs.c
new file mode 100644
index 000000000000..525a2f6bb076
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pxp_debugfs.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "xe_pxp_debugfs.h"
+
+#include <linux/debugfs.h>
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+
+#include "xe_device.h"
+#include "xe_pxp.h"
+#include "xe_pxp_types.h"
+#include "regs/xe_irq_regs.h"
+
+static struct xe_pxp *node_to_pxp(struct drm_info_node *node)
+{
+ return node->info_ent->data;
+}
+
+static const char *pxp_status_to_str(struct xe_pxp *pxp)
+{
+ lockdep_assert_held(&pxp->mutex);
+
+ switch (pxp->status) {
+ case XE_PXP_ERROR:
+ return "error";
+ case XE_PXP_NEEDS_TERMINATION:
+ return "needs termination";
+ case XE_PXP_TERMINATION_IN_PROGRESS:
+ return "termination in progress";
+ case XE_PXP_READY_TO_START:
+ return "ready to start";
+ case XE_PXP_ACTIVE:
+ return "active";
+ case XE_PXP_SUSPENDED:
+ return "suspended";
+ default:
+ return "unknown";
+ }
+};
+
+static int pxp_info(struct seq_file *m, void *data)
+{
+ struct xe_pxp *pxp = node_to_pxp(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+ const char *status;
+
+ if (!xe_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ mutex_lock(&pxp->mutex);
+ status = pxp_status_to_str(pxp);
+
+ drm_printf(&p, "status: %s\n", status);
+ drm_printf(&p, "instance counter: %u\n", pxp->key_instance);
+ mutex_unlock(&pxp->mutex);
+
+ return 0;
+}
+
+static int pxp_terminate(struct seq_file *m, void *data)
+{
+ struct xe_pxp *pxp = node_to_pxp(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+ int ready = xe_pxp_get_readiness_status(pxp);
+
+ if (ready < 0)
+ return ready; /* disabled or error occurred */
+ else if (!ready)
+ return -EBUSY; /* init still in progress */
+
+ /* no need for a termination if PXP is not active */
+ if (pxp->status != XE_PXP_ACTIVE) {
+ drm_printf(&p, "PXP not active\n");
+ return 0;
+ }
+
+ /* simulate a termination interrupt */
+ spin_lock_irq(&pxp->xe->irq.lock);
+ xe_pxp_irq_handler(pxp->xe, KCR_PXP_STATE_TERMINATED_INTERRUPT);
+ spin_unlock_irq(&pxp->xe->irq.lock);
+
+ drm_printf(&p, "PXP termination queued\n");
+
+ return 0;
+}
+
+static const struct drm_info_list debugfs_list[] = {
+ {"info", pxp_info, 0},
+ {"terminate", pxp_terminate, 0},
+};
+
+void xe_pxp_debugfs_register(struct xe_pxp *pxp)
+{
+ struct drm_minor *minor;
+ struct drm_info_list *local;
+ struct dentry *root;
+ int i;
+
+ if (!xe_pxp_is_enabled(pxp))
+ return;
+
+ minor = pxp->xe->drm.primary;
+ if (!minor->debugfs_root)
+ return;
+
+#define DEBUGFS_SIZE (ARRAY_SIZE(debugfs_list) * sizeof(struct drm_info_list))
+ local = drmm_kmalloc(&pxp->xe->drm, DEBUGFS_SIZE, GFP_KERNEL);
+ if (!local)
+ return;
+
+ memcpy(local, debugfs_list, DEBUGFS_SIZE);
+#undef DEBUGFS_SIZE
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_list); ++i)
+ local[i].data = pxp;
+
+ root = debugfs_create_dir("pxp", minor->debugfs_root);
+ if (IS_ERR(root))
+ return;
+
+ drm_debugfs_create_files(local,
+ ARRAY_SIZE(debugfs_list),
+ root, minor);
+}
diff --git a/drivers/gpu/drm/xe/xe_pxp_debugfs.h b/drivers/gpu/drm/xe/xe_pxp_debugfs.h
new file mode 100644
index 000000000000..988466aad50b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pxp_debugfs.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __XE_PXP_DEBUGFS_H__
+#define __XE_PXP_DEBUGFS_H__
+
+struct xe_pxp;
+
+void xe_pxp_debugfs_register(struct xe_pxp *pxp);
+
+#endif /* __XE_PXP_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/xe/xe_pxp_submit.c b/drivers/gpu/drm/xe/xe_pxp_submit.c
new file mode 100644
index 000000000000..d92ec0f515b0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pxp_submit.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2024 Intel Corporation.
+ */
+
+#include "xe_pxp_submit.h"
+
+#include <linux/delay.h>
+#include <uapi/drm/xe_drm.h>
+
+#include "xe_device_types.h"
+#include "xe_bb.h"
+#include "xe_bo.h"
+#include "xe_exec_queue.h"
+#include "xe_gsc_submit.h"
+#include "xe_gt.h"
+#include "xe_lrc.h"
+#include "xe_map.h"
+#include "xe_pxp.h"
+#include "xe_pxp_types.h"
+#include "xe_sched_job.h"
+#include "xe_vm.h"
+#include "abi/gsc_command_header_abi.h"
+#include "abi/gsc_pxp_commands_abi.h"
+#include "instructions/xe_gsc_commands.h"
+#include "instructions/xe_mfx_commands.h"
+#include "instructions/xe_mi_commands.h"
+
+/*
+ * The VCS is used for kernel-owned GGTT submissions to issue key termination.
+ * Terminations are serialized, so we only need a single queue and a single
+ * batch.
+ */
+static int allocate_vcs_execution_resources(struct xe_pxp *pxp)
+{
+ struct xe_gt *gt = pxp->gt;
+ struct xe_device *xe = pxp->xe;
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_hw_engine *hwe;
+ struct xe_exec_queue *q;
+ struct xe_bo *bo;
+ int err;
+
+ hwe = xe_gt_hw_engine(gt, XE_ENGINE_CLASS_VIDEO_DECODE, 0, true);
+ if (!hwe)
+ return -ENODEV;
+
+ q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1, hwe,
+ EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_PERMANENT, 0);
+ if (IS_ERR(q))
+ return PTR_ERR(q);
+
+ /*
+ * Each termination is 16 DWORDS, so 4K is enough to contain a
+ * termination for each sessions.
+ */
+ bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ goto out_queue;
+ }
+
+ pxp->vcs_exec.q = q;
+ pxp->vcs_exec.bo = bo;
+
+ return 0;
+
+out_queue:
+ xe_exec_queue_put(q);
+ return err;
+}
+
+static void destroy_vcs_execution_resources(struct xe_pxp *pxp)
+{
+ if (pxp->vcs_exec.bo)
+ xe_bo_unpin_map_no_vm(pxp->vcs_exec.bo);
+
+ if (pxp->vcs_exec.q)
+ xe_exec_queue_put(pxp->vcs_exec.q);
+}
+
+#define PXP_BB_SIZE XE_PAGE_SIZE
+static int allocate_gsc_client_resources(struct xe_gt *gt,
+ struct xe_pxp_gsc_client_resources *gsc_res,
+ size_t inout_size)
+{
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_hw_engine *hwe;
+ struct xe_vm *vm;
+ struct xe_bo *bo;
+ struct xe_exec_queue *q;
+ struct dma_fence *fence;
+ long timeout;
+ int err = 0;
+
+ hwe = xe_gt_hw_engine(gt, XE_ENGINE_CLASS_OTHER, 0, true);
+
+ /* we shouldn't reach here if the GSC engine is not available */
+ xe_assert(xe, hwe);
+
+ /* PXP instructions must be issued from PPGTT */
+ vm = xe_vm_create(xe, XE_VM_FLAG_GSC);
+ if (IS_ERR(vm))
+ return PTR_ERR(vm);
+
+ /* We allocate a single object for the batch and the in/out memory */
+ xe_vm_lock(vm, false);
+ bo = xe_bo_create_pin_map(xe, tile, vm, PXP_BB_SIZE + inout_size * 2,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_NEEDS_UC);
+ xe_vm_unlock(vm);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ goto vm_out;
+ }
+
+ fence = xe_vm_bind_kernel_bo(vm, bo, NULL, 0, XE_CACHE_WB);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto bo_out;
+ }
+
+ timeout = dma_fence_wait_timeout(fence, false, HZ);
+ dma_fence_put(fence);
+ if (timeout <= 0) {
+ err = timeout ?: -ETIME;
+ goto bo_out;
+ }
+
+ q = xe_exec_queue_create(xe, vm, BIT(hwe->logical_instance), 1, hwe,
+ EXEC_QUEUE_FLAG_KERNEL |
+ EXEC_QUEUE_FLAG_PERMANENT, 0);
+ if (IS_ERR(q)) {
+ err = PTR_ERR(q);
+ goto bo_out;
+ }
+
+ gsc_res->vm = vm;
+ gsc_res->bo = bo;
+ gsc_res->inout_size = inout_size;
+ gsc_res->batch = IOSYS_MAP_INIT_OFFSET(&bo->vmap, 0);
+ gsc_res->msg_in = IOSYS_MAP_INIT_OFFSET(&bo->vmap, PXP_BB_SIZE);
+ gsc_res->msg_out = IOSYS_MAP_INIT_OFFSET(&bo->vmap, PXP_BB_SIZE + inout_size);
+ gsc_res->q = q;
+
+ /* initialize host-session-handle (for all Xe-to-gsc-firmware PXP cmds) */
+ gsc_res->host_session_handle = xe_gsc_create_host_session_id();
+
+ return 0;
+
+bo_out:
+ xe_bo_unpin_map_no_vm(bo);
+vm_out:
+ xe_vm_close_and_put(vm);
+
+ return err;
+}
+
+static void destroy_gsc_client_resources(struct xe_pxp_gsc_client_resources *gsc_res)
+{
+ if (!gsc_res->q)
+ return;
+
+ xe_exec_queue_put(gsc_res->q);
+ xe_bo_unpin_map_no_vm(gsc_res->bo);
+ xe_vm_close_and_put(gsc_res->vm);
+}
+
+/**
+ * xe_pxp_allocate_execution_resources - Allocate PXP submission objects
+ * @pxp: the xe_pxp structure
+ *
+ * Allocates exec_queues objects for VCS and GSCCS submission. The GSCCS
+ * submissions are done via PPGTT, so this function allocates a VM for it and
+ * maps the object into it.
+ *
+ * Returns 0 if the allocation and mapping is successful, an errno value
+ * otherwise.
+ */
+int xe_pxp_allocate_execution_resources(struct xe_pxp *pxp)
+{
+ int err;
+
+ err = allocate_vcs_execution_resources(pxp);
+ if (err)
+ return err;
+
+ /*
+ * PXP commands can require a lot of BO space (see PXP_MAX_PACKET_SIZE),
+ * but we currently only support a subset of commands that are small
+ * (< 20 dwords), so a single page is enough for now.
+ */
+ err = allocate_gsc_client_resources(pxp->gt, &pxp->gsc_res, XE_PAGE_SIZE);
+ if (err)
+ goto destroy_vcs_context;
+
+ return 0;
+
+destroy_vcs_context:
+ destroy_vcs_execution_resources(pxp);
+ return err;
+}
+
+void xe_pxp_destroy_execution_resources(struct xe_pxp *pxp)
+{
+ destroy_gsc_client_resources(&pxp->gsc_res);
+ destroy_vcs_execution_resources(pxp);
+}
+
+#define emit_cmd(xe_, map_, offset_, val_) \
+ xe_map_wr(xe_, map_, (offset_) * sizeof(u32), u32, val_)
+
+/* stall until prior PXP and MFX/HCP/HUC objects are completed */
+#define MFX_WAIT_PXP (MFX_WAIT | \
+ MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG | \
+ MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG)
+static u32 pxp_emit_wait(struct xe_device *xe, struct iosys_map *batch, u32 offset)
+{
+ /* wait for cmds to go through */
+ emit_cmd(xe, batch, offset++, MFX_WAIT_PXP);
+ emit_cmd(xe, batch, offset++, 0);
+
+ return offset;
+}
+
+static u32 pxp_emit_session_selection(struct xe_device *xe, struct iosys_map *batch,
+ u32 offset, u32 idx)
+{
+ offset = pxp_emit_wait(xe, batch, offset);
+
+ /* pxp off */
+ emit_cmd(xe, batch, offset++, MI_FLUSH_DW | MI_FLUSH_IMM_DW);
+ emit_cmd(xe, batch, offset++, 0);
+ emit_cmd(xe, batch, offset++, 0);
+ emit_cmd(xe, batch, offset++, 0);
+
+ /* select session */
+ emit_cmd(xe, batch, offset++, MI_SET_APPID | MI_SET_APPID_SESSION_ID(idx));
+ emit_cmd(xe, batch, offset++, 0);
+
+ offset = pxp_emit_wait(xe, batch, offset);
+
+ /* pxp on */
+ emit_cmd(xe, batch, offset++, MI_FLUSH_DW |
+ MI_FLUSH_DW_PROTECTED_MEM_EN |
+ MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX |
+ MI_FLUSH_IMM_DW);
+ emit_cmd(xe, batch, offset++, LRC_PPHWSP_PXP_INVAL_SCRATCH_ADDR |
+ MI_FLUSH_DW_USE_GTT);
+ emit_cmd(xe, batch, offset++, 0);
+ emit_cmd(xe, batch, offset++, 0);
+
+ offset = pxp_emit_wait(xe, batch, offset);
+
+ return offset;
+}
+
+static u32 pxp_emit_inline_termination(struct xe_device *xe,
+ struct iosys_map *batch, u32 offset)
+{
+ /* session inline termination */
+ emit_cmd(xe, batch, offset++, CRYPTO_KEY_EXCHANGE);
+ emit_cmd(xe, batch, offset++, 0);
+
+ return offset;
+}
+
+static u32 pxp_emit_session_termination(struct xe_device *xe, struct iosys_map *batch,
+ u32 offset, u32 idx)
+{
+ offset = pxp_emit_session_selection(xe, batch, offset, idx);
+ offset = pxp_emit_inline_termination(xe, batch, offset);
+
+ return offset;
+}
+
+/**
+ * xe_pxp_submit_session_termination - submits a PXP inline termination
+ * @pxp: the xe_pxp structure
+ * @id: the session to terminate
+ *
+ * Emit an inline termination via the VCS engine to terminate a session.
+ *
+ * Returns 0 if the submission is successful, an errno value otherwise.
+ */
+int xe_pxp_submit_session_termination(struct xe_pxp *pxp, u32 id)
+{
+ struct xe_sched_job *job;
+ struct dma_fence *fence;
+ long timeout;
+ u32 offset = 0;
+ u64 addr = xe_bo_ggtt_addr(pxp->vcs_exec.bo);
+
+ offset = pxp_emit_session_termination(pxp->xe, &pxp->vcs_exec.bo->vmap, offset, id);
+ offset = pxp_emit_wait(pxp->xe, &pxp->vcs_exec.bo->vmap, offset);
+ emit_cmd(pxp->xe, &pxp->vcs_exec.bo->vmap, offset, MI_BATCH_BUFFER_END);
+
+ job = xe_sched_job_create(pxp->vcs_exec.q, &addr);
+ if (IS_ERR(job))
+ return PTR_ERR(job);
+
+ xe_sched_job_arm(job);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ timeout = dma_fence_wait_timeout(fence, false, HZ);
+
+ dma_fence_put(fence);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+ else if (timeout < 0)
+ return timeout;
+
+ return 0;
+}
+
+static bool
+is_fw_err_platform_config(u32 type)
+{
+ switch (type) {
+ case PXP_STATUS_ERROR_API_VERSION:
+ case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF:
+ case PXP_STATUS_PLATFCONFIG_KF1_BAD:
+ case PXP_STATUS_PLATFCONFIG_FIXED_KF1_NOT_SUPPORTED:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+static const char *
+fw_err_to_string(u32 type)
+{
+ switch (type) {
+ case PXP_STATUS_ERROR_API_VERSION:
+ return "ERR_API_VERSION";
+ case PXP_STATUS_NOT_READY:
+ return "ERR_NOT_READY";
+ case PXP_STATUS_PLATFCONFIG_KF1_NOVERIF:
+ case PXP_STATUS_PLATFCONFIG_KF1_BAD:
+ case PXP_STATUS_PLATFCONFIG_FIXED_KF1_NOT_SUPPORTED:
+ return "ERR_PLATFORM_CONFIG";
+ default:
+ break;
+ }
+ return NULL;
+}
+
+static int pxp_pkt_submit(struct xe_exec_queue *q, u64 batch_addr)
+{
+ struct xe_gt *gt = q->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_sched_job *job;
+ struct dma_fence *fence;
+ long timeout;
+
+ xe_assert(xe, q->hwe->engine_id == XE_HW_ENGINE_GSCCS0);
+
+ job = xe_sched_job_create(q, &batch_addr);
+ if (IS_ERR(job))
+ return PTR_ERR(job);
+
+ xe_sched_job_arm(job);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ timeout = dma_fence_wait_timeout(fence, false, HZ);
+ dma_fence_put(fence);
+ if (timeout < 0)
+ return timeout;
+ else if (!timeout)
+ return -ETIME;
+
+ return 0;
+}
+
+static void emit_pxp_heci_cmd(struct xe_device *xe, struct iosys_map *batch,
+ u64 addr_in, u32 size_in, u64 addr_out, u32 size_out)
+{
+ u32 len = 0;
+
+ xe_map_wr(xe, batch, len++ * sizeof(u32), u32, GSC_HECI_CMD_PKT);
+ xe_map_wr(xe, batch, len++ * sizeof(u32), u32, lower_32_bits(addr_in));
+ xe_map_wr(xe, batch, len++ * sizeof(u32), u32, upper_32_bits(addr_in));
+ xe_map_wr(xe, batch, len++ * sizeof(u32), u32, size_in);
+ xe_map_wr(xe, batch, len++ * sizeof(u32), u32, lower_32_bits(addr_out));
+ xe_map_wr(xe, batch, len++ * sizeof(u32), u32, upper_32_bits(addr_out));
+ xe_map_wr(xe, batch, len++ * sizeof(u32), u32, size_out);
+ xe_map_wr(xe, batch, len++ * sizeof(u32), u32, 0);
+ xe_map_wr(xe, batch, len++ * sizeof(u32), u32, MI_BATCH_BUFFER_END);
+}
+
+#define GSC_PENDING_RETRY_MAXCOUNT 40
+#define GSC_PENDING_RETRY_PAUSE_MS 50
+static int gsccs_send_message(struct xe_pxp_gsc_client_resources *gsc_res,
+ void *msg_in, size_t msg_in_size,
+ void *msg_out, size_t msg_out_size_max)
+{
+ struct xe_device *xe = gsc_res->vm->xe;
+ const size_t max_msg_size = gsc_res->inout_size - sizeof(struct intel_gsc_mtl_header);
+ u32 wr_offset;
+ u32 rd_offset;
+ u32 reply_size;
+ u32 min_reply_size = 0;
+ int ret;
+ int retry = GSC_PENDING_RETRY_MAXCOUNT;
+
+ if (msg_in_size > max_msg_size || msg_out_size_max > max_msg_size)
+ return -ENOSPC;
+
+ wr_offset = xe_gsc_emit_header(xe, &gsc_res->msg_in, 0,
+ HECI_MEADDRESS_PXP,
+ gsc_res->host_session_handle,
+ msg_in_size);
+
+ /* NOTE: zero size packets are used for session-cleanups */
+ if (msg_in && msg_in_size) {
+ xe_map_memcpy_to(xe, &gsc_res->msg_in, wr_offset,
+ msg_in, msg_in_size);
+ min_reply_size = sizeof(struct pxp_cmd_header);
+ }
+
+ /* Make sure the reply header does not contain stale data */
+ xe_gsc_poison_header(xe, &gsc_res->msg_out, 0);
+
+ /*
+ * The BO is mapped at address 0 of the PPGTT, so no need to add its
+ * base offset when calculating the in/out addresses.
+ */
+ emit_pxp_heci_cmd(xe, &gsc_res->batch, PXP_BB_SIZE,
+ wr_offset + msg_in_size, PXP_BB_SIZE + gsc_res->inout_size,
+ wr_offset + msg_out_size_max);
+
+ xe_device_wmb(xe);
+
+ /*
+ * If the GSC needs to communicate with CSME to complete our request,
+ * it'll set the "pending" flag in the return header. In this scenario
+ * we're expected to wait 50ms to give some time to the proxy code to
+ * handle the GSC<->CSME communication and then try again. Note that,
+ * although in most case the 50ms window is enough, the proxy flow is
+ * not actually guaranteed to complete within that time period, so we
+ * might have to try multiple times, up to a worst case of 2 seconds,
+ * after which the request is considered aborted.
+ */
+ do {
+ ret = pxp_pkt_submit(gsc_res->q, 0);
+ if (ret)
+ break;
+
+ if (xe_gsc_check_and_update_pending(xe, &gsc_res->msg_in, 0,
+ &gsc_res->msg_out, 0)) {
+ ret = -EAGAIN;
+ msleep(GSC_PENDING_RETRY_PAUSE_MS);
+ }
+ } while (--retry && ret == -EAGAIN);
+
+ if (ret) {
+ drm_err(&xe->drm, "failed to submit GSC PXP message (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ ret = xe_gsc_read_out_header(xe, &gsc_res->msg_out, 0,
+ min_reply_size, &rd_offset);
+ if (ret) {
+ drm_err(&xe->drm, "invalid GSC reply for PXP (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ if (msg_out && min_reply_size) {
+ reply_size = xe_map_rd_field(xe, &gsc_res->msg_out, rd_offset,
+ struct pxp_cmd_header, buffer_len);
+ reply_size += sizeof(struct pxp_cmd_header);
+
+ if (reply_size > msg_out_size_max) {
+ drm_warn(&xe->drm, "PXP reply size overflow: %u (%zu)\n",
+ reply_size, msg_out_size_max);
+ reply_size = msg_out_size_max;
+ }
+
+ xe_map_memcpy_from(xe, msg_out, &gsc_res->msg_out,
+ rd_offset, reply_size);
+ }
+
+ xe_gsc_poison_header(xe, &gsc_res->msg_in, 0);
+
+ return ret;
+}
+
+/**
+ * xe_pxp_submit_session_init - submits a PXP GSC session initialization
+ * @gsc_res: the pxp client resources
+ * @id: the session to initialize
+ *
+ * Submit a message to the GSC FW to initialize (i.e. start) a PXP session.
+ *
+ * Returns 0 if the submission is successful, an errno value otherwise.
+ */
+int xe_pxp_submit_session_init(struct xe_pxp_gsc_client_resources *gsc_res, u32 id)
+{
+ struct xe_device *xe = gsc_res->vm->xe;
+ struct pxp43_create_arb_in msg_in = {0};
+ struct pxp43_create_arb_out msg_out = {0};
+ int ret;
+
+ msg_in.header.api_version = PXP_APIVER(4, 3);
+ msg_in.header.command_id = PXP43_CMDID_INIT_SESSION;
+ msg_in.header.stream_id = (FIELD_PREP(PXP43_INIT_SESSION_APPID, id) |
+ FIELD_PREP(PXP43_INIT_SESSION_VALID, 1) |
+ FIELD_PREP(PXP43_INIT_SESSION_APPTYPE, 0));
+ msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
+
+ if (id == DRM_XE_PXP_HWDRM_DEFAULT_SESSION)
+ msg_in.protection_mode = PXP43_INIT_SESSION_PROTECTION_ARB;
+
+ ret = gsccs_send_message(gsc_res, &msg_in, sizeof(msg_in),
+ &msg_out, sizeof(msg_out));
+ if (ret) {
+ drm_err(&xe->drm, "Failed to init PXP session %u (%pe)\n", id, ERR_PTR(ret));
+ } else if (msg_out.header.status != 0) {
+ ret = -EIO;
+
+ if (is_fw_err_platform_config(msg_out.header.status))
+ drm_info_once(&xe->drm,
+ "Failed to init PXP session %u due to BIOS/SOC, s=0x%x(%s)\n",
+ id, msg_out.header.status,
+ fw_err_to_string(msg_out.header.status));
+ else
+ drm_dbg(&xe->drm, "Failed to init PXP session %u, s=0x%x\n",
+ id, msg_out.header.status);
+ }
+
+ return ret;
+}
+
+/**
+ * xe_pxp_submit_session_invalidation - submits a PXP GSC invalidation
+ * @gsc_res: the pxp client resources
+ * @id: the session to invalidate
+ *
+ * Submit a message to the GSC FW to notify it that a session has been
+ * terminated and is therefore invalid.
+ *
+ * Returns 0 if the submission is successful, an errno value otherwise.
+ */
+int xe_pxp_submit_session_invalidation(struct xe_pxp_gsc_client_resources *gsc_res, u32 id)
+{
+ struct xe_device *xe = gsc_res->vm->xe;
+ struct pxp43_inv_stream_key_in msg_in = {0};
+ struct pxp43_inv_stream_key_out msg_out = {0};
+ int ret = 0;
+
+ /*
+ * Stream key invalidation reuses the same version 4.2 input/output
+ * command format but firmware requires 4.3 API interaction
+ */
+ msg_in.header.api_version = PXP_APIVER(4, 3);
+ msg_in.header.command_id = PXP43_CMDID_INVALIDATE_STREAM_KEY;
+ msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
+
+ msg_in.header.stream_id = FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_VALID, 1);
+ msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_APP_TYPE, 0);
+ msg_in.header.stream_id |= FIELD_PREP(PXP_CMDHDR_EXTDATA_SESSION_ID, id);
+
+ ret = gsccs_send_message(gsc_res, &msg_in, sizeof(msg_in),
+ &msg_out, sizeof(msg_out));
+ if (ret) {
+ drm_err(&xe->drm, "Failed to invalidate PXP stream-key %u (%pe)\n",
+ id, ERR_PTR(ret));
+ } else if (msg_out.header.status != 0) {
+ ret = -EIO;
+
+ if (is_fw_err_platform_config(msg_out.header.status))
+ drm_info_once(&xe->drm,
+ "Failed to invalidate PXP stream-key %u: BIOS/SOC 0x%08x(%s)\n",
+ id, msg_out.header.status,
+ fw_err_to_string(msg_out.header.status));
+ else
+ drm_dbg(&xe->drm, "Failed to invalidate stream-key %u, s=0x%08x\n",
+ id, msg_out.header.status);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_pxp_submit.h b/drivers/gpu/drm/xe/xe_pxp_submit.h
new file mode 100644
index 000000000000..c9efda02f4b0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pxp_submit.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2024, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __XE_PXP_SUBMIT_H__
+#define __XE_PXP_SUBMIT_H__
+
+#include <linux/types.h>
+
+struct xe_pxp;
+struct xe_pxp_gsc_client_resources;
+
+int xe_pxp_allocate_execution_resources(struct xe_pxp *pxp);
+void xe_pxp_destroy_execution_resources(struct xe_pxp *pxp);
+
+int xe_pxp_submit_session_init(struct xe_pxp_gsc_client_resources *gsc_res, u32 id);
+int xe_pxp_submit_session_termination(struct xe_pxp *pxp, u32 id);
+int xe_pxp_submit_session_invalidation(struct xe_pxp_gsc_client_resources *gsc_res,
+ u32 id);
+
+#endif /* __XE_PXP_SUBMIT_H__ */
diff --git a/drivers/gpu/drm/xe/xe_pxp_types.h b/drivers/gpu/drm/xe/xe_pxp_types.h
new file mode 100644
index 000000000000..53e9d48d10fb
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pxp_types.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2024, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __XE_PXP_TYPES_H__
+#define __XE_PXP_TYPES_H__
+
+#include <linux/completion.h>
+#include <linux/iosys-map.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct xe_bo;
+struct xe_exec_queue;
+struct xe_device;
+struct xe_gt;
+struct xe_vm;
+
+enum xe_pxp_status {
+ XE_PXP_ERROR = -1,
+ XE_PXP_NEEDS_TERMINATION = 0, /* starting status */
+ XE_PXP_NEEDS_ADDITIONAL_TERMINATION,
+ XE_PXP_TERMINATION_IN_PROGRESS,
+ XE_PXP_READY_TO_START,
+ XE_PXP_START_IN_PROGRESS,
+ XE_PXP_ACTIVE,
+ XE_PXP_SUSPENDED,
+};
+
+/**
+ * struct xe_pxp_gsc_client_resources - resources for GSC submission by a PXP
+ * client. The GSC FW supports multiple GSC client active at the same time.
+ */
+struct xe_pxp_gsc_client_resources {
+ /**
+ * @host_session_handle: handle used to identify the client in messages
+ * sent to the GSC firmware.
+ */
+ u64 host_session_handle;
+ /** @vm: VM used for PXP submissions to the GSCCS */
+ struct xe_vm *vm;
+ /** @q: GSCCS exec queue for PXP submissions */
+ struct xe_exec_queue *q;
+
+ /**
+ * @bo: BO used for submissions to the GSCCS and GSC FW. It includes
+ * space for the GSCCS batch and the input/output buffers read/written
+ * by the FW
+ */
+ struct xe_bo *bo;
+ /** @inout_size: size of each of the msg_in/out sections individually */
+ u32 inout_size;
+ /** @batch: iosys_map to the batch memory within the BO */
+ struct iosys_map batch;
+ /** @msg_in: iosys_map to the input memory within the BO */
+ struct iosys_map msg_in;
+ /** @msg_out: iosys_map to the output memory within the BO */
+ struct iosys_map msg_out;
+};
+
+/**
+ * struct xe_pxp - pxp state
+ */
+struct xe_pxp {
+ /** @xe: Backpoiner to the xe_device struct */
+ struct xe_device *xe;
+
+ /**
+ * @gt: pointer to the gt that owns the submission-side of PXP
+ * (VDBOX, KCR and GSC)
+ */
+ struct xe_gt *gt;
+
+ /** @vcs_exec: kernel-owned objects for PXP submissions to the VCS */
+ struct {
+ /** @vcs_exec.q: kernel-owned VCS exec queue used for PXP terminations */
+ struct xe_exec_queue *q;
+ /** @vcs_exec.bo: BO used for submissions to the VCS */
+ struct xe_bo *bo;
+ } vcs_exec;
+
+ /** @gsc_res: kernel-owned objects for PXP submissions to the GSCCS */
+ struct xe_pxp_gsc_client_resources gsc_res;
+
+ /** @irq: wrapper for the worker and queue used for PXP irq support */
+ struct {
+ /** @irq.work: worker that manages irq events. */
+ struct work_struct work;
+ /** @irq.wq: workqueue on which to queue the irq work. */
+ struct workqueue_struct *wq;
+ /** @irq.events: pending events, protected with xe->irq.lock. */
+ u32 events;
+#define PXP_TERMINATION_REQUEST BIT(0)
+#define PXP_TERMINATION_COMPLETE BIT(1)
+ } irq;
+
+ /** @mutex: protects the pxp status and the queue list */
+ struct mutex mutex;
+ /** @status: the current pxp status */
+ enum xe_pxp_status status;
+ /** @activation: completion struct that tracks pxp start */
+ struct completion activation;
+ /** @termination: completion struct that tracks terminations */
+ struct completion termination;
+
+ /** @queues: management of exec_queues that use PXP */
+ struct {
+ /** @queues.lock: spinlock protecting the queue management */
+ spinlock_t lock;
+ /** @queues.list: list of exec_queues that use PXP */
+ struct list_head list;
+ } queues;
+
+ /**
+ * @key_instance: keep track of the current iteration of the PXP key.
+ * Note that, due to the time needed for PXP termination and re-start
+ * to complete, the minimum time between 2 subsequent increases of this
+ * variable is 50ms, and even that only if there is a continuous attack;
+ * normal behavior is for this to increase much much slower than that.
+ * This means that we don't expect this to ever wrap and don't implement
+ * that case in the code.
+ */
+ u32 key_instance;
+ /**
+ * @last_suspend_key_instance: value of key_instance at the last
+ * suspend. Used to check if any PXP session has been created between
+ * suspend cycles.
+ */
+ u32 last_suspend_key_instance;
+};
+
+#endif /* __XE_PXP_TYPES_H__ */
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 848da8e68c7a..2dbf4066d86f 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -9,12 +9,14 @@
#include <linux/sched/clock.h>
#include <drm/ttm/ttm_placement.h>
+#include <generated/xe_wa_oob.h>
#include <uapi/drm/xe_drm.h>
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
#include "xe_bo.h"
#include "xe_device.h"
+#include "xe_eu_stall.h"
#include "xe_exec_queue.h"
#include "xe_force_wake.h"
#include "xe_ggtt.h"
@@ -22,7 +24,10 @@
#include "xe_guc_hwconfig.h"
#include "xe_macros.h"
#include "xe_mmio.h"
+#include "xe_oa.h"
+#include "xe_pxp.h"
#include "xe_ttm_vram_mgr.h"
+#include "xe_wa.h"
static const u16 xe_to_user_engine_class[] = {
[XE_ENGINE_CLASS_RENDER] = DRM_XE_ENGINE_CLASS_RENDER,
@@ -83,24 +88,22 @@ static __ktime_func_t __clock_id_to_func(clockid_t clk_id)
}
static void
-__read_timestamps(struct xe_gt *gt,
- struct xe_reg lower_reg,
- struct xe_reg upper_reg,
- u64 *engine_ts,
- u64 *cpu_ts,
- u64 *cpu_delta,
- __ktime_func_t cpu_clock)
+hwe_read_timestamp(struct xe_hw_engine *hwe, u64 *engine_ts, u64 *cpu_ts,
+ u64 *cpu_delta, __ktime_func_t cpu_clock)
{
+ struct xe_mmio *mmio = &hwe->gt->mmio;
u32 upper, lower, old_upper, loop = 0;
+ struct xe_reg upper_reg = RING_TIMESTAMP_UDW(hwe->mmio_base),
+ lower_reg = RING_TIMESTAMP(hwe->mmio_base);
- upper = xe_mmio_read32(gt, upper_reg);
+ upper = xe_mmio_read32(mmio, upper_reg);
do {
*cpu_delta = local_clock();
*cpu_ts = cpu_clock();
- lower = xe_mmio_read32(gt, lower_reg);
+ lower = xe_mmio_read32(mmio, lower_reg);
*cpu_delta = local_clock() - *cpu_delta;
old_upper = upper;
- upper = xe_mmio_read32(gt, upper_reg);
+ upper = xe_mmio_read32(mmio, upper_reg);
} while (upper != old_upper && loop++ < 2);
*engine_ts = (u64)upper << 32 | lower;
@@ -117,6 +120,10 @@ query_engine_cycles(struct xe_device *xe,
__ktime_func_t cpu_clock;
struct xe_hw_engine *hwe;
struct xe_gt *gt;
+ unsigned int fw_ref;
+
+ if (IS_SRIOV_VF(xe))
+ return -EOPNOTSUPP;
if (query->size == 0) {
query->size = size;
@@ -149,18 +156,16 @@ query_engine_cycles(struct xe_device *xe,
if (!hwe)
return -EINVAL;
- if (xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL))
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
return -EIO;
+ }
- __read_timestamps(gt,
- RING_TIMESTAMP(hwe->mmio_base),
- RING_TIMESTAMP_UDW(hwe->mmio_base),
- &resp.engine_cycles,
- &resp.cpu_timestamp,
- &resp.cpu_delta,
- cpu_clock);
+ hwe_read_timestamp(hwe, &resp.engine_cycles, &resp.cpu_timestamp,
+ &resp.cpu_delta, cpu_clock);
- xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
if (GRAPHICS_VER(xe) >= 20)
resp.width = 64;
@@ -168,16 +173,10 @@ query_engine_cycles(struct xe_device *xe,
resp.width = 36;
/* Only write to the output fields of user query */
- if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp))
- return -EFAULT;
-
- if (put_user(resp.cpu_delta, &query_ptr->cpu_delta))
- return -EFAULT;
-
- if (put_user(resp.engine_cycles, &query_ptr->engine_cycles))
- return -EFAULT;
-
- if (put_user(resp.width, &query_ptr->width))
+ if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp) ||
+ put_user(resp.cpu_delta, &query_ptr->cpu_delta) ||
+ put_user(resp.engine_cycles, &query_ptr->engine_cycles) ||
+ put_user(resp.width, &query_ptr->width))
return -EFAULT;
return 0;
@@ -339,8 +338,13 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] =
xe->info.devid | (xe->info.revid << 16);
if (xe_device_get_root_tile(xe)->mem.vram.usable_size)
- config->info[DRM_XE_QUERY_CONFIG_FLAGS] =
+ config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM;
+ if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM))
+ config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
+ DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR;
+ config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
+ DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY;
config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT] =
xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K;
config->info[DRM_XE_QUERY_CONFIG_VA_BITS] = xe->info.va_bits;
@@ -458,12 +462,23 @@ static int query_hwconfig(struct xe_device *xe,
static size_t calc_topo_query_size(struct xe_device *xe)
{
- return xe->info.gt_count *
- (4 * sizeof(struct drm_xe_query_topology_mask) +
- sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) +
- sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) +
- sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask) +
- sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss));
+ struct xe_gt *gt;
+ size_t query_size = 0;
+ int id;
+
+ for_each_gt(gt, xe, id) {
+ query_size += 3 * sizeof(struct drm_xe_query_topology_mask) +
+ sizeof_field(struct xe_gt, fuse_topo.g_dss_mask) +
+ sizeof_field(struct xe_gt, fuse_topo.c_dss_mask) +
+ sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss);
+
+ /* L3bank mask may not be available for some GTs */
+ if (!XE_WA(gt, no_media_l3))
+ query_size += sizeof(struct drm_xe_query_topology_mask) +
+ sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask);
+ }
+
+ return query_size;
}
static int copy_mask(void __user **ptr,
@@ -516,11 +531,18 @@ static int query_gt_topology(struct xe_device *xe,
if (err)
return err;
- topo.type = DRM_XE_TOPO_L3_BANK;
- err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask,
- sizeof(gt->fuse_topo.l3_bank_mask));
- if (err)
- return err;
+ /*
+ * If the kernel doesn't have a way to obtain a correct L3bank
+ * mask, then it's better to omit L3 from the query rather than
+ * reporting bogus or zeroed information to userspace.
+ */
+ if (!XE_WA(gt, no_media_l3)) {
+ topo.type = DRM_XE_TOPO_L3_BANK;
+ err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask,
+ sizeof(gt->fuse_topo.l3_bank_mask));
+ if (err)
+ return err;
+ }
topo.type = gt->fuse_topo.eu_type == XE_GT_EU_TYPE_SIMD16 ?
DRM_XE_TOPO_SIMD16_EU_PER_DSS :
@@ -659,7 +681,9 @@ static int query_oa_units(struct xe_device *xe,
du->oa_unit_id = u->oa_unit_id;
du->oa_unit_type = u->type;
du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt);
- du->capabilities = DRM_XE_OA_CAPS_BASE;
+ du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS |
+ DRM_XE_OA_CAPS_OA_BUFFER_SIZE |
+ DRM_XE_OA_CAPS_WAIT_NUM_REPORTS;
j = 0;
for_each_hw_engine(hwe, gt, hwe_id) {
@@ -684,6 +708,74 @@ static int query_oa_units(struct xe_device *xe,
return ret ? -EFAULT : 0;
}
+static int query_pxp_status(struct xe_device *xe, struct drm_xe_device_query *query)
+{
+ struct drm_xe_query_pxp_status __user *query_ptr = u64_to_user_ptr(query->data);
+ size_t size = sizeof(struct drm_xe_query_pxp_status);
+ struct drm_xe_query_pxp_status resp = { 0 };
+ int ret;
+
+ if (query->size == 0) {
+ query->size = size;
+ return 0;
+ } else if (XE_IOCTL_DBG(xe, query->size != size)) {
+ return -EINVAL;
+ }
+
+ ret = xe_pxp_get_readiness_status(xe->pxp);
+ if (ret < 0)
+ return ret;
+
+ resp.status = ret;
+ resp.supported_session_types = BIT(DRM_XE_PXP_TYPE_HWDRM);
+
+ if (copy_to_user(query_ptr, &resp, size))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int query_eu_stall(struct xe_device *xe,
+ struct drm_xe_device_query *query)
+{
+ void __user *query_ptr = u64_to_user_ptr(query->data);
+ struct drm_xe_query_eu_stall *info;
+ size_t size, array_size;
+ const u64 *rates;
+ u32 num_rates;
+ int ret;
+
+ if (!xe_eu_stall_supported_on_platform(xe)) {
+ drm_dbg(&xe->drm, "EU stall monitoring is not supported on this platform\n");
+ return -ENODEV;
+ }
+
+ array_size = xe_eu_stall_get_sampling_rates(&num_rates, &rates);
+ size = sizeof(struct drm_xe_query_eu_stall) + array_size;
+
+ if (query->size == 0) {
+ query->size = size;
+ return 0;
+ } else if (XE_IOCTL_DBG(xe, query->size != size)) {
+ return -EINVAL;
+ }
+
+ info = kzalloc(size, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->num_sampling_rates = num_rates;
+ info->capabilities = DRM_XE_EU_STALL_CAPS_BASE;
+ info->record_size = xe_eu_stall_data_record_size(xe);
+ info->per_xecore_buf_size = xe_eu_stall_get_per_xecore_buf_size();
+ memcpy(info->sampling_rates, rates, array_size);
+
+ ret = copy_to_user(query_ptr, info, size);
+ kfree(info);
+
+ return ret ? -EFAULT : 0;
+}
+
static int (* const xe_query_funcs[])(struct xe_device *xe,
struct drm_xe_device_query *query) = {
query_engines,
@@ -695,6 +787,8 @@ static int (* const xe_query_funcs[])(struct xe_device *xe,
query_engine_cycles,
query_uc_fw_version,
query_oa_units,
+ query_pxp_status,
+ query_eu_stall,
};
int xe_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c
index 440ac572f6e5..fc8447a838c4 100644
--- a/drivers/gpu/drm/xe/xe_reg_sr.c
+++ b/drivers/gpu/drm/xe/xe_reg_sr.c
@@ -15,6 +15,7 @@
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
+#include "xe_device.h"
#include "xe_device_types.h"
#include "xe_force_wake.h"
#include "xe_gt.h"
@@ -23,49 +24,29 @@
#include "xe_hw_engine_types.h"
#include "xe_macros.h"
#include "xe_mmio.h"
-#include "xe_reg_whitelist.h"
#include "xe_rtp_types.h"
-#define XE_REG_SR_GROW_STEP_DEFAULT 16
-
static void reg_sr_fini(struct drm_device *drm, void *arg)
{
struct xe_reg_sr *sr = arg;
+ struct xe_reg_sr_entry *entry;
+ unsigned long reg;
+
+ xa_for_each(&sr->xa, reg, entry)
+ kfree(entry);
xa_destroy(&sr->xa);
- kfree(sr->pool.arr);
- memset(&sr->pool, 0, sizeof(sr->pool));
}
int xe_reg_sr_init(struct xe_reg_sr *sr, const char *name, struct xe_device *xe)
{
xa_init(&sr->xa);
- memset(&sr->pool, 0, sizeof(sr->pool));
- sr->pool.grow_step = XE_REG_SR_GROW_STEP_DEFAULT;
sr->name = name;
return drmm_add_action_or_reset(&xe->drm, reg_sr_fini, sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_reg_sr_init);
-static struct xe_reg_sr_entry *alloc_entry(struct xe_reg_sr *sr)
-{
- if (sr->pool.used == sr->pool.allocated) {
- struct xe_reg_sr_entry *arr;
-
- arr = krealloc_array(sr->pool.arr,
- ALIGN(sr->pool.allocated + 1, sr->pool.grow_step),
- sizeof(*arr), GFP_KERNEL);
- if (!arr)
- return NULL;
-
- sr->pool.arr = arr;
- sr->pool.allocated += sr->pool.grow_step;
- }
-
- return &sr->pool.arr[sr->pool.used++];
-}
-
static bool compatible_entries(const struct xe_reg_sr_entry *e1,
const struct xe_reg_sr_entry *e2)
{
@@ -111,7 +92,7 @@ int xe_reg_sr_add(struct xe_reg_sr *sr,
return 0;
}
- pentry = alloc_entry(sr);
+ pentry = kmalloc(sizeof(*pentry), GFP_KERNEL);
if (!pentry) {
ret = -ENOMEM;
goto fail;
@@ -164,7 +145,7 @@ static void apply_one_mmio(struct xe_gt *gt, struct xe_reg_sr_entry *entry)
else if (entry->clr_bits + 1)
val = (reg.mcr ?
xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
- xe_mmio_read32(gt, reg)) & (~entry->clr_bits);
+ xe_mmio_read32(&gt->mmio, reg)) & (~entry->clr_bits);
else
val = 0;
@@ -180,86 +161,37 @@ static void apply_one_mmio(struct xe_gt *gt, struct xe_reg_sr_entry *entry)
if (entry->reg.mcr)
xe_gt_mcr_multicast_write(gt, reg_mcr, val);
else
- xe_mmio_write32(gt, reg, val);
+ xe_mmio_write32(&gt->mmio, reg, val);
}
void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt)
{
struct xe_reg_sr_entry *entry;
unsigned long reg;
- int err;
+ unsigned int fw_ref;
if (xa_empty(&sr->xa))
return;
+ if (IS_SRIOV_VF(gt_to_xe(gt)))
+ return;
+
xe_gt_dbg(gt, "Applying %s save-restore MMIOs\n", sr->name);
- err = xe_force_wake_get(&gt->mmio.fw, XE_FORCEWAKE_ALL);
- if (err)
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
goto err_force_wake;
xa_for_each(&sr->xa, reg, entry)
apply_one_mmio(gt, entry);
- err = xe_force_wake_put(&gt->mmio.fw, XE_FORCEWAKE_ALL);
- XE_WARN_ON(err);
-
- return;
-
-err_force_wake:
- xe_gt_err(gt, "Failed to apply, err=%d\n", err);
-}
-
-void xe_reg_sr_apply_whitelist(struct xe_hw_engine *hwe)
-{
- struct xe_reg_sr *sr = &hwe->reg_whitelist;
- struct xe_gt *gt = hwe->gt;
- struct xe_device *xe = gt_to_xe(gt);
- struct xe_reg_sr_entry *entry;
- struct drm_printer p;
- u32 mmio_base = hwe->mmio_base;
- unsigned long reg;
- unsigned int slot = 0;
- int err;
-
- if (xa_empty(&sr->xa))
- return;
-
- drm_dbg(&xe->drm, "Whitelisting %s registers\n", sr->name);
-
- err = xe_force_wake_get(&gt->mmio.fw, XE_FORCEWAKE_ALL);
- if (err)
- goto err_force_wake;
-
- p = drm_dbg_printer(&xe->drm, DRM_UT_DRIVER, NULL);
- xa_for_each(&sr->xa, reg, entry) {
- if (slot == RING_MAX_NONPRIV_SLOTS) {
- xe_gt_err(gt,
- "hwe %s: maximum register whitelist slots (%d) reached, refusing to add more\n",
- hwe->name, RING_MAX_NONPRIV_SLOTS);
- break;
- }
-
- xe_reg_whitelist_print_entry(&p, 0, reg, entry);
- xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(mmio_base, slot),
- reg | entry->set_bits);
- slot++;
- }
-
- /* And clear the rest just in case of garbage */
- for (; slot < RING_MAX_NONPRIV_SLOTS; slot++) {
- u32 addr = RING_NOPID(mmio_base).addr;
-
- xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(mmio_base, slot), addr);
- }
-
- err = xe_force_wake_put(&gt->mmio.fw, XE_FORCEWAKE_ALL);
- XE_WARN_ON(err);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
return;
err_force_wake:
- drm_err(&xe->drm, "Failed to apply, err=%d\n", err);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ xe_gt_err(gt, "Failed to apply, err=-ETIMEDOUT\n");
}
/**
diff --git a/drivers/gpu/drm/xe/xe_reg_sr_types.h b/drivers/gpu/drm/xe/xe_reg_sr_types.h
index ad48a52b824a..ebe11f237fa2 100644
--- a/drivers/gpu/drm/xe/xe_reg_sr_types.h
+++ b/drivers/gpu/drm/xe/xe_reg_sr_types.h
@@ -20,12 +20,6 @@ struct xe_reg_sr_entry {
};
struct xe_reg_sr {
- struct {
- struct xe_reg_sr_entry *arr;
- unsigned int used;
- unsigned int allocated;
- unsigned int grow_step;
- } pool;
struct xarray xa;
const char *name;
diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.c b/drivers/gpu/drm/xe/xe_reg_whitelist.c
index 3996934974fa..23f6c81d9994 100644
--- a/drivers/gpu/drm/xe/xe_reg_whitelist.c
+++ b/drivers/gpu/drm/xe/xe_reg_whitelist.c
@@ -10,7 +10,9 @@
#include "regs/xe_oa_regs.h"
#include "regs/xe_regs.h"
#include "xe_gt_types.h"
+#include "xe_gt_printk.h"
#include "xe_platform_types.h"
+#include "xe_reg_sr.h"
#include "xe_rtp.h"
#include "xe_step.h"
@@ -86,9 +88,42 @@ static const struct xe_rtp_entry_sr register_whitelist[] = {
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4))
},
- {}
};
+static void whitelist_apply_to_hwe(struct xe_hw_engine *hwe)
+{
+ struct xe_reg_sr *sr = &hwe->reg_whitelist;
+ struct xe_reg_sr_entry *entry;
+ struct drm_printer p;
+ unsigned long reg;
+ unsigned int slot;
+
+ xe_gt_dbg(hwe->gt, "Add %s whitelist to engine\n", sr->name);
+ p = xe_gt_dbg_printer(hwe->gt);
+
+ slot = 0;
+ xa_for_each(&sr->xa, reg, entry) {
+ struct xe_reg_sr_entry hwe_entry = {
+ .reg = RING_FORCE_TO_NONPRIV(hwe->mmio_base, slot),
+ .set_bits = entry->reg.addr | entry->set_bits,
+ .clr_bits = ~0u,
+ .read_mask = entry->read_mask,
+ };
+
+ if (slot == RING_MAX_NONPRIV_SLOTS) {
+ xe_gt_err(hwe->gt,
+ "hwe %s: maximum register whitelist slots (%d) reached, refusing to add more\n",
+ hwe->name, RING_MAX_NONPRIV_SLOTS);
+ break;
+ }
+
+ xe_reg_whitelist_print_entry(&p, 0, reg, entry);
+ xe_reg_sr_add(&hwe->reg_sr, &hwe_entry, hwe->gt);
+
+ slot++;
+ }
+}
+
/**
* xe_reg_whitelist_process_engine - process table of registers to whitelist
* @hwe: engine instance to process whitelist for
@@ -101,7 +136,9 @@ void xe_reg_whitelist_process_engine(struct xe_hw_engine *hwe)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
- xe_rtp_process_to_sr(&ctx, register_whitelist, &hwe->reg_whitelist);
+ xe_rtp_process_to_sr(&ctx, register_whitelist, ARRAY_SIZE(register_whitelist),
+ &hwe->reg_whitelist);
+ whitelist_apply_to_hwe(hwe);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_res_cursor.h b/drivers/gpu/drm/xe/xe_res_cursor.h
index dca374b6521c..d1a403cfb628 100644
--- a/drivers/gpu/drm/xe/xe_res_cursor.h
+++ b/drivers/gpu/drm/xe/xe_res_cursor.h
@@ -26,6 +26,7 @@
#include <linux/scatterlist.h>
+#include <drm/drm_pagemap.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_resource.h>
@@ -34,17 +35,38 @@
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_macros.h"
+#include "xe_svm.h"
#include "xe_ttm_vram_mgr.h"
-/* state back for walking over vram_mgr, stolen_mgr, and gtt_mgr allocations */
+/**
+ * struct xe_res_cursor - state for walking over dma mapping, vram_mgr,
+ * stolen_mgr, and gtt_mgr allocations
+ */
struct xe_res_cursor {
+ /** @start: Start of cursor */
u64 start;
+ /** @size: Size of the current segment. */
u64 size;
+ /** @remaining: Remaining bytes in cursor */
u64 remaining;
+ /** @node: Opaque point current node cursor */
void *node;
+ /** @mem_type: Memory type */
u32 mem_type;
+ /** @sgl: Scatterlist for cursor */
struct scatterlist *sgl;
+ /** @dma_addr: Current element in a struct drm_pagemap_device_addr array */
+ const struct drm_pagemap_device_addr *dma_addr;
+ /** @mm: Buddy allocator for VRAM cursor */
struct drm_buddy *mm;
+ /**
+ * @dma_start: DMA start address for the current segment.
+ * This may be different to @dma_addr.addr since elements in
+ * the array may be coalesced to a single segment.
+ */
+ u64 dma_start;
+ /** @dma_seg_size: Size of the current DMA segment. */
+ u64 dma_seg_size;
};
static struct drm_buddy *xe_res_get_buddy(struct ttm_resource *res)
@@ -70,6 +92,7 @@ static inline void xe_res_first(struct ttm_resource *res,
struct xe_res_cursor *cur)
{
cur->sgl = NULL;
+ cur->dma_addr = NULL;
if (!res)
goto fallback;
@@ -142,6 +165,36 @@ static inline void __xe_res_sg_next(struct xe_res_cursor *cur)
}
/**
+ * __xe_res_dma_next() - Advance the cursor when end-of-segment is reached
+ * @cur: The cursor
+ */
+static inline void __xe_res_dma_next(struct xe_res_cursor *cur)
+{
+ const struct drm_pagemap_device_addr *addr = cur->dma_addr;
+ u64 start = cur->start;
+
+ while (start >= cur->dma_seg_size) {
+ start -= cur->dma_seg_size;
+ addr++;
+ cur->dma_seg_size = PAGE_SIZE << addr->order;
+ }
+ cur->dma_start = addr->addr;
+
+ /* Coalesce array_elements */
+ while (cur->dma_seg_size - start < cur->remaining) {
+ if (cur->dma_start + cur->dma_seg_size != addr[1].addr ||
+ addr->proto != addr[1].proto)
+ break;
+ addr++;
+ cur->dma_seg_size += PAGE_SIZE << addr->order;
+ }
+
+ cur->dma_addr = addr;
+ cur->start = start;
+ cur->size = cur->dma_seg_size - start;
+}
+
+/**
* xe_res_first_sg - initialize a xe_res_cursor with a scatter gather table
*
* @sg: scatter gather table to walk
@@ -160,12 +213,43 @@ static inline void xe_res_first_sg(const struct sg_table *sg,
cur->start = start;
cur->remaining = size;
cur->size = 0;
+ cur->dma_addr = NULL;
cur->sgl = sg->sgl;
cur->mem_type = XE_PL_TT;
__xe_res_sg_next(cur);
}
/**
+ * xe_res_first_dma - initialize a xe_res_cursor with dma_addr array
+ *
+ * @dma_addr: struct drm_pagemap_device_addr array to walk
+ * @start: Start of the range
+ * @size: Size of the range
+ * @cur: cursor object to initialize
+ *
+ * Start walking over the range of allocations between @start and @size.
+ */
+static inline void xe_res_first_dma(const struct drm_pagemap_device_addr *dma_addr,
+ u64 start, u64 size,
+ struct xe_res_cursor *cur)
+{
+ XE_WARN_ON(!dma_addr);
+ XE_WARN_ON(!IS_ALIGNED(start, PAGE_SIZE) ||
+ !IS_ALIGNED(size, PAGE_SIZE));
+
+ cur->node = NULL;
+ cur->start = start;
+ cur->remaining = size;
+ cur->dma_seg_size = PAGE_SIZE << dma_addr->order;
+ cur->dma_start = 0;
+ cur->size = 0;
+ cur->dma_addr = dma_addr;
+ __xe_res_dma_next(cur);
+ cur->sgl = NULL;
+ cur->mem_type = XE_PL_TT;
+}
+
+/**
* xe_res_next - advance the cursor
*
* @cur: the cursor to advance
@@ -191,6 +275,12 @@ static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
return;
}
+ if (cur->dma_addr) {
+ cur->start += size;
+ __xe_res_dma_next(cur);
+ return;
+ }
+
if (cur->sgl) {
cur->start += size;
__xe_res_sg_next(cur);
@@ -232,6 +322,35 @@ static inline void xe_res_next(struct xe_res_cursor *cur, u64 size)
*/
static inline u64 xe_res_dma(const struct xe_res_cursor *cur)
{
- return cur->sgl ? sg_dma_address(cur->sgl) + cur->start : cur->start;
+ if (cur->dma_addr)
+ return cur->dma_start + cur->start;
+ else if (cur->sgl)
+ return sg_dma_address(cur->sgl) + cur->start;
+ else
+ return cur->start;
+}
+
+/**
+ * xe_res_is_vram() - Whether the cursor current dma address points to
+ * same-device VRAM
+ * @cur: The cursor.
+ *
+ * Return: true iff the address returned by xe_res_dma() points to internal vram.
+ */
+static inline bool xe_res_is_vram(const struct xe_res_cursor *cur)
+{
+ if (cur->dma_addr)
+ return cur->dma_addr->proto == XE_INTERCONNECT_VRAM;
+
+ switch (cur->mem_type) {
+ case XE_PL_STOLEN:
+ case XE_PL_VRAM0:
+ case XE_PL_VRAM1:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
}
#endif
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index 0be4f489d3e1..bc1689db4cd7 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -90,11 +90,10 @@ static int emit_flush_dw(u32 *dw, int i)
return i;
}
-static int emit_flush_imm_ggtt(u32 addr, u32 value, bool invalidate_tlb,
- u32 *dw, int i)
+static int emit_flush_imm_ggtt(u32 addr, u32 value, u32 flags, u32 *dw, int i)
{
dw[i++] = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW |
- (invalidate_tlb ? MI_INVALIDATE_TLB : 0);
+ flags;
dw[i++] = addr | MI_FLUSH_DW_USE_GTT;
dw[i++] = 0;
dw[i++] = value;
@@ -111,16 +110,13 @@ static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i)
return i;
}
-static int emit_flush_invalidate(u32 flag, u32 *dw, int i)
+static int emit_flush_invalidate(u32 *dw, int i)
{
- dw[i] = MI_FLUSH_DW;
- dw[i] |= flag;
- dw[i++] |= MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW |
- MI_FLUSH_DW_STORE_INDEX;
-
- dw[i++] = LRC_PPHWSP_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+ dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
+ MI_FLUSH_IMM_DW | MI_FLUSH_DW_STORE_INDEX;
+ dw[i++] = LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR;
+ dw[i++] = 0;
dw[i++] = 0;
- dw[i++] = ~0U;
return i;
}
@@ -141,7 +137,8 @@ emit_pipe_control(u32 *dw, int i, u32 bit_group_0, u32 bit_group_1, u32 offset,
static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
int i)
{
- u32 flags = PIPE_CONTROL_CS_STALL |
+ u32 flags0 = 0;
+ u32 flags1 = PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_COMMAND_CACHE_INVALIDATE |
PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
@@ -152,11 +149,15 @@ static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
PIPE_CONTROL_STORE_DATA_INDEX;
if (invalidate_tlb)
- flags |= PIPE_CONTROL_TLB_INVALIDATE;
+ flags1 |= PIPE_CONTROL_TLB_INVALIDATE;
- flags &= ~mask_flags;
+ flags1 &= ~mask_flags;
- return emit_pipe_control(dw, i, 0, flags, LRC_PPHWSP_SCRATCH_ADDR, 0);
+ if (flags1 & PIPE_CONTROL_VF_CACHE_INVALIDATE)
+ flags0 |= PIPE_CONTROL0_L3_READ_ONLY_CACHE_INVALIDATE;
+
+ return emit_pipe_control(dw, i, flags0, flags1,
+ LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
}
static int emit_store_imm_ppgtt_posted(u64 addr, u64 value,
@@ -177,6 +178,10 @@ static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
u32 flags;
+ if (XE_WA(gt, 14016712196))
+ i = emit_pipe_control(dw, i, 0, PIPE_CONTROL_DEPTH_CACHE_FLUSH,
+ LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
+
flags = (PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_TILE_CACHE_FLUSH |
PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
@@ -221,18 +226,18 @@ static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw,
static u32 get_ppgtt_flag(struct xe_sched_job *job)
{
- return job->q->vm ? BIT(8) : 0;
+ if (job->q->vm && !job->ggtt)
+ return BIT(8);
+
+ return 0;
}
static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
{
- dw[i++] = MI_COPY_MEM_MEM | MI_COPY_MEM_MEM_SRC_GGTT |
- MI_COPY_MEM_MEM_DST_GGTT;
+ dw[i++] = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
+ dw[i++] = RING_CTX_TIMESTAMP(0).addr;
dw[i++] = xe_lrc_ctx_job_timestamp_ggtt_addr(lrc);
dw[i++] = 0;
- dw[i++] = xe_lrc_ctx_timestamp_ggtt_addr(lrc);
- dw[i++] = 0;
- dw[i++] = MI_NOOP;
return i;
}
@@ -250,7 +255,7 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
if (job->ring_ops_flush_tlb) {
dw[i++] = preparser_disable(true);
i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
- seqno, true, dw, i);
+ seqno, MI_INVALIDATE_TLB, dw, i);
dw[i++] = preparser_disable(false);
} else {
i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
@@ -266,7 +271,7 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
dw, i);
}
- i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
+ i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, 0, dw, i);
i = emit_user_interrupt(dw, i);
@@ -312,7 +317,7 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
if (job->ring_ops_flush_tlb)
i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
- seqno, true, dw, i);
+ seqno, MI_INVALIDATE_TLB, dw, i);
dw[i++] = preparser_disable(false);
@@ -329,7 +334,7 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
dw, i);
}
- i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
+ i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, 0, dw, i);
i = emit_user_interrupt(dw, i);
@@ -406,7 +411,7 @@ static void emit_migration_job_gen12(struct xe_sched_job *job,
if (!IS_SRIOV_VF(gt_to_xe(job->q->gt))) {
/* XXX: Do we need this? Leaving for now. */
dw[i++] = preparser_disable(true);
- i = emit_flush_invalidate(0, dw, i);
+ i = emit_flush_invalidate(dw, i);
dw[i++] = preparser_disable(false);
}
diff --git a/drivers/gpu/drm/xe/xe_ring_ops_types.h b/drivers/gpu/drm/xe/xe_ring_ops_types.h
index 1ae56e2ee7b4..d7e3e150a9a5 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops_types.h
+++ b/drivers/gpu/drm/xe/xe_ring_ops_types.h
@@ -8,7 +8,7 @@
struct xe_sched_job;
-#define MAX_JOB_SIZE_DW 48
+#define MAX_JOB_SIZE_DW 58
#define MAX_JOB_SIZE_BYTES (MAX_JOB_SIZE_DW * 4)
/**
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index 86c705d18c0d..29e694bb1219 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -196,7 +196,7 @@ static void rtp_get_context(struct xe_rtp_process_ctx *ctx,
*gt = (*hwe)->gt;
*xe = gt_to_xe(*gt);
break;
- };
+ }
}
/**
@@ -237,6 +237,7 @@ static void rtp_mark_active(struct xe_device *xe,
* the save-restore argument.
* @ctx: The context for processing the table, with one of device, gt or hwe
* @entries: Table with RTP definitions
+ * @n_entries: Number of entries to process, usually ARRAY_SIZE(entries)
* @sr: Save-restore struct where matching rules execute the action. This can be
* viewed as the "coalesced view" of multiple the tables. The bits for each
* register set are expected not to collide with previously added entries
@@ -247,6 +248,7 @@ static void rtp_mark_active(struct xe_device *xe,
*/
void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
const struct xe_rtp_entry_sr *entries,
+ size_t n_entries,
struct xe_reg_sr *sr)
{
const struct xe_rtp_entry_sr *entry;
@@ -256,10 +258,9 @@ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
rtp_get_context(ctx, &hwe, &gt, &xe);
- if (IS_SRIOV_VF(xe))
- return;
+ xe_assert(xe, entries);
- for (entry = entries; entry && entry->name; entry++) {
+ for (entry = entries; entry - entries < n_entries; entry++) {
bool match = false;
if (entry->flags & XE_RTP_ENTRY_FLAG_FOREACH_ENGINE) {
@@ -340,3 +341,8 @@ bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
return dss >= dss_per_gslice;
}
+bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
+{
+ return !IS_SRIOV_VF(gt_to_xe(gt));
+}
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
index 827d932b6908..4fe736a11c42 100644
--- a/drivers/gpu/drm/xe/xe_rtp.h
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -131,7 +131,7 @@ struct xe_reg_sr;
* @ver_end__: Last graphics IP version to match
*
* Note that the range matching this rule is [ @ver_start__, @ver_end__ ], i.e.
- * inclusive on boths sides
+ * inclusive on both sides
*
* Refer to XE_RTP_RULES() for expected usage.
*/
@@ -169,7 +169,7 @@ struct xe_reg_sr;
* @ver_end__: Last media IP version to match
*
* Note that the range matching this rule is [ @ver_start__, @ver_end__ ], i.e.
- * inclusive on boths sides
+ * inclusive on both sides
*
* Refer to XE_RTP_RULES() for expected usage.
*/
@@ -430,7 +430,7 @@ void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx,
void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx,
const struct xe_rtp_entry_sr *entries,
- struct xe_reg_sr *sr);
+ size_t n_entries, struct xe_reg_sr *sr);
void xe_rtp_process(struct xe_rtp_process_ctx *ctx,
const struct xe_rtp_entry *entries);
@@ -476,4 +476,15 @@ bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
+/*
+ * xe_rtp_match_not_sriov_vf - Match when not on SR-IOV VF device
+ *
+ * @gt: GT structure
+ * @hwe: Engine instance
+ *
+ * Returns: true if device is not VF, false otherwise.
+ */
+bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
index fe2cb2a96f78..1d43e183ca21 100644
--- a/drivers/gpu/drm/xe/xe_sa.c
+++ b/drivers/gpu/drm/xe/xe_sa.c
@@ -31,47 +31,56 @@ static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg)
sa_manager->bo = NULL;
}
-struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align)
+/**
+ * __xe_sa_bo_manager_init() - Create and initialize the suballocator
+ * @tile: the &xe_tile where allocate
+ * @size: number of bytes to allocate
+ * @guard: number of bytes to exclude from suballocations
+ * @align: alignment for each suballocated chunk
+ *
+ * Prepares the suballocation manager for suballocations.
+ *
+ * Return: a pointer to the &xe_sa_manager or an ERR_PTR on failure.
+ */
+struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 guard, u32 align)
{
struct xe_device *xe = tile_to_xe(tile);
- u32 managed_size = size - SZ_4K;
+ struct xe_sa_manager *sa_manager;
+ u32 managed_size;
struct xe_bo *bo;
int ret;
- struct xe_sa_manager *sa_manager = drmm_kzalloc(&tile_to_xe(tile)->drm,
- sizeof(*sa_manager),
- GFP_KERNEL);
+ xe_tile_assert(tile, size > guard);
+ managed_size = size - guard;
+
+ sa_manager = drmm_kzalloc(&xe->drm, sizeof(*sa_manager), GFP_KERNEL);
if (!sa_manager)
return ERR_PTR(-ENOMEM);
- sa_manager->bo = NULL;
-
bo = xe_managed_bo_create_pin_map(xe, tile, size,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
+ XE_BO_FLAG_GGTT_INVALIDATE |
+ XE_BO_FLAG_PINNED_NORESTORE);
if (IS_ERR(bo)) {
- drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n",
- PTR_ERR(bo));
- return (struct xe_sa_manager *)bo;
+ drm_err(&xe->drm, "Failed to prepare %uKiB BO for SA manager (%pe)\n",
+ size / SZ_1K, bo);
+ return ERR_CAST(bo);
}
sa_manager->bo = bo;
sa_manager->is_iomem = bo->vmap.is_iomem;
-
- drm_suballoc_manager_init(&sa_manager->base, managed_size, align);
sa_manager->gpu_addr = xe_bo_ggtt_addr(bo);
if (bo->vmap.is_iomem) {
sa_manager->cpu_ptr = kvzalloc(managed_size, GFP_KERNEL);
- if (!sa_manager->cpu_ptr) {
- sa_manager->bo = NULL;
+ if (!sa_manager->cpu_ptr)
return ERR_PTR(-ENOMEM);
- }
} else {
sa_manager->cpu_ptr = bo->vmap.vaddr;
memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size);
}
+ drm_suballoc_manager_init(&sa_manager->base, managed_size, align);
ret = drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini,
sa_manager);
if (ret)
@@ -80,8 +89,17 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
return sa_manager;
}
-struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
- unsigned int size)
+/**
+ * __xe_sa_bo_new() - Make a suballocation but use custom gfp flags.
+ * @sa_manager: the &xe_sa_manager
+ * @size: number of bytes we want to suballocate
+ * @gfp: gfp flags used for memory allocation. Typically GFP_KERNEL.
+ *
+ * Try to make a suballocation of size @size.
+ *
+ * Return: a &drm_suballoc, or an ERR_PTR.
+ */
+struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size, gfp_t gfp)
{
/*
* BB to large, return -ENOBUFS indicating user should split
@@ -90,7 +108,7 @@ struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
if (size > sa_manager->base.size)
return ERR_PTR(-ENOBUFS);
- return drm_suballoc_new(&sa_manager->base, size, GFP_KERNEL, true, 0);
+ return drm_suballoc_new(&sa_manager->base, size, gfp, true, 0);
}
void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo)
diff --git a/drivers/gpu/drm/xe/xe_sa.h b/drivers/gpu/drm/xe/xe_sa.h
index 4e96483057d7..1170ee5a81a8 100644
--- a/drivers/gpu/drm/xe/xe_sa.h
+++ b/drivers/gpu/drm/xe/xe_sa.h
@@ -5,19 +5,37 @@
#ifndef _XE_SA_H_
#define _XE_SA_H_
+#include <linux/sizes.h>
+#include <linux/types.h>
#include "xe_sa_types.h"
struct dma_fence;
-struct xe_bo;
struct xe_tile;
-struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align);
+struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 guard, u32 align);
+struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size, gfp_t gfp);
+
+static inline struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32 align)
+{
+ return __xe_sa_bo_manager_init(tile, size, SZ_4K, align);
+}
+
+/**
+ * xe_sa_bo_new() - Make a suballocation.
+ * @sa_manager: the &xe_sa_manager
+ * @size: number of bytes we want to suballocate
+ *
+ * Try to make a suballocation of size @size.
+ *
+ * Return: a &drm_suballoc, or an ERR_PTR.
+ */
+static inline struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size)
+{
+ return __xe_sa_bo_new(sa_manager, size, GFP_KERNEL);
+}
-struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
- u32 size);
void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo);
-void xe_sa_bo_free(struct drm_suballoc *sa_bo,
- struct dma_fence *fence);
+void xe_sa_bo_free(struct drm_suballoc *sa_bo, struct dma_fence *fence);
static inline struct xe_sa_manager *
to_xe_sa_manager(struct drm_suballoc_manager *mng)
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index eeccc1c318ae..1905ca590965 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -280,7 +280,7 @@ void xe_sched_job_arm(struct xe_sched_job *job)
fence = &chain->base;
}
- job->fence = fence;
+ job->fence = dma_fence_get(fence); /* Pairs with put in scheduler */
drm_sched_job_arm(&job->drm);
}
diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
index 0d3f76fb05ce..dbf260dded8d 100644
--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
+++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
@@ -18,9 +18,9 @@ struct dma_fence_chain;
* struct xe_job_ptrs - Per hw engine instance data
*/
struct xe_job_ptrs {
- /** @lrc_fence: Pre-allocated uinitialized lrc fence.*/
+ /** @lrc_fence: Pre-allocated uninitialized lrc fence.*/
struct dma_fence *lrc_fence;
- /** @chain_fence: Pre-allocated ninitialized fence chain node. */
+ /** @chain_fence: Pre-allocated uninitialized fence chain node. */
struct dma_fence_chain *chain_fence;
/** @batch_addr: Batch buffer address. */
u64 batch_addr;
@@ -40,7 +40,6 @@ struct xe_sched_job {
* @fence: dma fence to indicate completion. 1 way relationship - job
* can safely reference fence, fence cannot safely reference job.
*/
-#define JOB_FLAG_SUBMIT DMA_FENCE_FLAG_USER_BITS
struct dma_fence *fence;
/** @user_fence: write back value when BB is complete */
struct {
@@ -57,13 +56,15 @@ struct xe_sched_job {
u32 migrate_flush_flags;
/** @ring_ops_flush_tlb: The ring ops need to flush TLB before payload. */
bool ring_ops_flush_tlb;
+ /** @ggtt: mapped in ggtt. */
+ bool ggtt;
/** @ptrs: per instance pointers. */
struct xe_job_ptrs ptrs[];
};
struct xe_sched_job_snapshot {
u16 batch_addr_len;
- u64 batch_addr[];
+ u64 batch_addr[] __counted_by(batch_addr_len);
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_shrinker.c b/drivers/gpu/drm/xe/xe_shrinker.c
new file mode 100644
index 000000000000..86d47aaf0358
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_shrinker.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include <linux/shrinker.h>
+
+#include <drm/ttm/ttm_backup.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "xe_bo.h"
+#include "xe_pm.h"
+#include "xe_shrinker.h"
+
+/**
+ * struct xe_shrinker - per-device shrinker
+ * @xe: Back pointer to the device.
+ * @lock: Lock protecting accounting.
+ * @shrinkable_pages: Number of pages that are currently shrinkable.
+ * @purgeable_pages: Number of pages that are currently purgeable.
+ * @shrink: Pointer to the mm shrinker.
+ * @pm_worker: Worker to wake up the device if required.
+ */
+struct xe_shrinker {
+ struct xe_device *xe;
+ rwlock_t lock;
+ long shrinkable_pages;
+ long purgeable_pages;
+ struct shrinker *shrink;
+ struct work_struct pm_worker;
+};
+
+static struct xe_shrinker *to_xe_shrinker(struct shrinker *shrink)
+{
+ return shrink->private_data;
+}
+
+/**
+ * xe_shrinker_mod_pages() - Modify shrinker page accounting
+ * @shrinker: Pointer to the struct xe_shrinker.
+ * @shrinkable: Shrinkable pages delta. May be negative.
+ * @purgeable: Purgeable page delta. May be negative.
+ *
+ * Modifies the shrinkable and purgeable pages accounting.
+ */
+void
+xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgeable)
+{
+ write_lock(&shrinker->lock);
+ shrinker->shrinkable_pages += shrinkable;
+ shrinker->purgeable_pages += purgeable;
+ write_unlock(&shrinker->lock);
+}
+
+static s64 xe_shrinker_walk(struct xe_device *xe,
+ struct ttm_operation_ctx *ctx,
+ const struct xe_bo_shrink_flags flags,
+ unsigned long to_scan, unsigned long *scanned)
+{
+ unsigned int mem_type;
+ s64 freed = 0, lret;
+
+ for (mem_type = XE_PL_SYSTEM; mem_type <= XE_PL_TT; ++mem_type) {
+ struct ttm_resource_manager *man = ttm_manager_type(&xe->ttm, mem_type);
+ struct ttm_bo_lru_cursor curs;
+ struct ttm_buffer_object *ttm_bo;
+
+ if (!man || !man->use_tt)
+ continue;
+
+ ttm_bo_lru_for_each_reserved_guarded(&curs, man, ctx, ttm_bo) {
+ if (!ttm_bo_shrink_suitable(ttm_bo, ctx))
+ continue;
+
+ lret = xe_bo_shrink(ctx, ttm_bo, flags, scanned);
+ if (lret < 0)
+ return lret;
+
+ freed += lret;
+ if (*scanned >= to_scan)
+ break;
+ }
+ }
+
+ return freed;
+}
+
+static unsigned long
+xe_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct xe_shrinker *shrinker = to_xe_shrinker(shrink);
+ unsigned long num_pages;
+ bool can_backup = !!(sc->gfp_mask & __GFP_FS);
+
+ num_pages = ttm_backup_bytes_avail() >> PAGE_SHIFT;
+ read_lock(&shrinker->lock);
+
+ if (can_backup)
+ num_pages = min_t(unsigned long, num_pages, shrinker->shrinkable_pages);
+ else
+ num_pages = 0;
+
+ num_pages += shrinker->purgeable_pages;
+ read_unlock(&shrinker->lock);
+
+ return num_pages ? num_pages : SHRINK_EMPTY;
+}
+
+/*
+ * Check if we need runtime pm, and if so try to grab a reference if
+ * already active. If grabbing a reference fails, queue a worker that
+ * does it for us outside of reclaim, but don't wait for it to complete.
+ * If bo shrinking needs an rpm reference and we don't have it (yet),
+ * that bo will be skipped anyway.
+ */
+static bool xe_shrinker_runtime_pm_get(struct xe_shrinker *shrinker, bool force,
+ unsigned long nr_to_scan, bool can_backup)
+{
+ struct xe_device *xe = shrinker->xe;
+
+ if (IS_DGFX(xe) || !xe_device_has_flat_ccs(xe) ||
+ !ttm_backup_bytes_avail())
+ return false;
+
+ if (!force) {
+ read_lock(&shrinker->lock);
+ force = (nr_to_scan > shrinker->purgeable_pages && can_backup);
+ read_unlock(&shrinker->lock);
+ if (!force)
+ return false;
+ }
+
+ if (!xe_pm_runtime_get_if_active(xe)) {
+ if (xe_rpm_reclaim_safe(xe) && !ttm_bo_shrink_avoid_wait()) {
+ xe_pm_runtime_get(xe);
+ return true;
+ }
+ queue_work(xe->unordered_wq, &shrinker->pm_worker);
+ return false;
+ }
+
+ return true;
+}
+
+static void xe_shrinker_runtime_pm_put(struct xe_shrinker *shrinker, bool runtime_pm)
+{
+ if (runtime_pm)
+ xe_pm_runtime_put(shrinker->xe);
+}
+
+static unsigned long xe_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct xe_shrinker *shrinker = to_xe_shrinker(shrink);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = ttm_bo_shrink_avoid_wait(),
+ };
+ unsigned long nr_to_scan, nr_scanned = 0, freed = 0;
+ struct xe_bo_shrink_flags shrink_flags = {
+ .purge = true,
+ /* Don't request writeback without __GFP_IO. */
+ .writeback = !ctx.no_wait_gpu && (sc->gfp_mask & __GFP_IO),
+ };
+ bool runtime_pm;
+ bool purgeable;
+ bool can_backup = !!(sc->gfp_mask & __GFP_FS);
+ s64 lret;
+
+ nr_to_scan = sc->nr_to_scan;
+
+ read_lock(&shrinker->lock);
+ purgeable = !!shrinker->purgeable_pages;
+ read_unlock(&shrinker->lock);
+
+ /* Might need runtime PM. Try to wake early if it looks like it. */
+ runtime_pm = xe_shrinker_runtime_pm_get(shrinker, false, nr_to_scan, can_backup);
+
+ if (purgeable && nr_scanned < nr_to_scan) {
+ lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags,
+ nr_to_scan, &nr_scanned);
+ if (lret >= 0)
+ freed += lret;
+ }
+
+ sc->nr_scanned = nr_scanned;
+ if (nr_scanned >= nr_to_scan || !can_backup)
+ goto out;
+
+ /* If we didn't wake before, try to do it now if needed. */
+ if (!runtime_pm)
+ runtime_pm = xe_shrinker_runtime_pm_get(shrinker, true, 0, can_backup);
+
+ shrink_flags.purge = false;
+ lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags,
+ nr_to_scan, &nr_scanned);
+ if (lret >= 0)
+ freed += lret;
+
+ sc->nr_scanned = nr_scanned;
+out:
+ xe_shrinker_runtime_pm_put(shrinker, runtime_pm);
+ return nr_scanned ? freed : SHRINK_STOP;
+}
+
+/* Wake up the device for shrinking. */
+static void xe_shrinker_pm(struct work_struct *work)
+{
+ struct xe_shrinker *shrinker =
+ container_of(work, typeof(*shrinker), pm_worker);
+
+ xe_pm_runtime_get(shrinker->xe);
+ xe_pm_runtime_put(shrinker->xe);
+}
+
+/**
+ * xe_shrinker_create() - Create an xe per-device shrinker
+ * @xe: Pointer to the xe device.
+ *
+ * Returns: A pointer to the created shrinker on success,
+ * Negative error code on failure.
+ */
+struct xe_shrinker *xe_shrinker_create(struct xe_device *xe)
+{
+ struct xe_shrinker *shrinker = kzalloc(sizeof(*shrinker), GFP_KERNEL);
+
+ if (!shrinker)
+ return ERR_PTR(-ENOMEM);
+
+ shrinker->shrink = shrinker_alloc(0, "drm-xe_gem:%s", xe->drm.unique);
+ if (!shrinker->shrink) {
+ kfree(shrinker);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_WORK(&shrinker->pm_worker, xe_shrinker_pm);
+ shrinker->xe = xe;
+ rwlock_init(&shrinker->lock);
+ shrinker->shrink->count_objects = xe_shrinker_count;
+ shrinker->shrink->scan_objects = xe_shrinker_scan;
+ shrinker->shrink->private_data = shrinker;
+ shrinker_register(shrinker->shrink);
+
+ return shrinker;
+}
+
+/**
+ * xe_shrinker_destroy() - Destroy an xe per-device shrinker
+ * @shrinker: Pointer to the shrinker to destroy.
+ */
+void xe_shrinker_destroy(struct xe_shrinker *shrinker)
+{
+ xe_assert(shrinker->xe, !shrinker->shrinkable_pages);
+ xe_assert(shrinker->xe, !shrinker->purgeable_pages);
+ shrinker_free(shrinker->shrink);
+ flush_work(&shrinker->pm_worker);
+ kfree(shrinker);
+}
diff --git a/drivers/gpu/drm/xe/xe_shrinker.h b/drivers/gpu/drm/xe/xe_shrinker.h
new file mode 100644
index 000000000000..28a038f4fcbf
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_shrinker.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_SHRINKER_H_
+#define _XE_SHRINKER_H_
+
+struct xe_shrinker;
+struct xe_device;
+
+void xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgeable);
+
+struct xe_shrinker *xe_shrinker_create(struct xe_device *xe);
+
+void xe_shrinker_destroy(struct xe_shrinker *shrinker);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c
index 5a1d65e4f19f..a0eab44c0e76 100644
--- a/drivers/gpu/drm/xe/xe_sriov.c
+++ b/drivers/gpu/drm/xe/xe_sriov.c
@@ -3,6 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/fault-inject.h>
+
#include <drm/drm_managed.h>
#include "regs/xe_regs.h"
@@ -12,6 +14,7 @@
#include "xe_mmio.h"
#include "xe_sriov.h"
#include "xe_sriov_pf.h"
+#include "xe_sriov_vf.h"
/**
* xe_sriov_mode_to_string - Convert enum value to string.
@@ -35,7 +38,7 @@ const char *xe_sriov_mode_to_string(enum xe_sriov_mode mode)
static bool test_is_vf(struct xe_device *xe)
{
- u32 value = xe_mmio_read32(xe_root_mmio_gt(xe), VF_CAP_REG);
+ u32 value = xe_mmio_read32(xe_root_tile_mmio(xe), VF_CAP_REG);
return value & VF_CAP;
}
@@ -78,7 +81,7 @@ void xe_sriov_probe_early(struct xe_device *xe)
xe->sriov.__mode = mode;
xe_assert(xe, xe->sriov.__mode);
- if (has_sriov)
+ if (IS_SRIOV(xe))
drm_info(&xe->drm, "Running in %s mode\n",
xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
}
@@ -112,6 +115,9 @@ int xe_sriov_init(struct xe_device *xe)
return err;
}
+ if (IS_SRIOV_VF(xe))
+ xe_sriov_vf_init_early(xe);
+
xe_assert(xe, !xe->sriov.wq);
xe->sriov.wq = alloc_workqueue("xe-sriov-wq", 0, 0);
if (!xe->sriov.wq)
@@ -119,6 +125,7 @@ int xe_sriov_init(struct xe_device *xe)
return drmm_add_action_or_reset(&xe->drm, fini_sriov, xe);
}
+ALLOW_ERROR_INJECTION(xe_sriov_init, ERRNO); /* See xe_pci_probe() */
/**
* xe_sriov_print_info - Print basic SR-IOV information.
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h b/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
index 7d156ba82479..dd1df950b021 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
@@ -20,7 +20,7 @@
* is within a range of supported VF numbers (up to maximum number of VFs that
* driver can support, including VF0 that represents the PF itself).
*
- * Note: Effective only on debug builds. See `Xe ASSERTs`_ for more information.
+ * Note: Effective only on debug builds. See `Xe Asserts`_ for more information.
*/
#define xe_sriov_pf_assert_vfid(xe, vfid) \
xe_assert((xe), (vfid) <= xe_sriov_pf_get_totalvfs(xe))
diff --git a/drivers/gpu/drm/xe/xe_sriov_types.h b/drivers/gpu/drm/xe/xe_sriov_types.h
index c7b7ad4af5c8..ca94382a721e 100644
--- a/drivers/gpu/drm/xe/xe_sriov_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_types.h
@@ -9,6 +9,7 @@
#include <linux/build_bug.h>
#include <linux/mutex.h>
#include <linux/types.h>
+#include <linux/workqueue_types.h>
/**
* VFID - Virtual Function Identifier
@@ -56,4 +57,20 @@ struct xe_device_pf {
struct mutex master_lock;
};
+/**
+ * struct xe_device_vf - Xe Virtual Function related data
+ *
+ * The data in this structure is valid only if driver is running in the
+ * @XE_SRIOV_MODE_VF mode.
+ */
+struct xe_device_vf {
+ /** @migration: VF Migration state data */
+ struct {
+ /** @migration.worker: VF migration recovery worker */
+ struct work_struct worker;
+ /** @migration.gt_flags: Per-GT request flags for VF migration recovery */
+ unsigned long gt_flags;
+ } migration;
+};
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c
new file mode 100644
index 000000000000..c1275e64aa9c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "xe_assert.h"
+#include "xe_device.h"
+#include "xe_gt_sriov_printk.h"
+#include "xe_gt_sriov_vf.h"
+#include "xe_pm.h"
+#include "xe_sriov.h"
+#include "xe_sriov_printk.h"
+#include "xe_sriov_vf.h"
+
+/**
+ * DOC: VF restore procedure in PF KMD and VF KMD
+ *
+ * Restoring previously saved state of a VF is one of core features of
+ * SR-IOV. All major VM Management applications allow saving and restoring
+ * the VM state, and doing that to a VM which uses SRIOV VF as one of
+ * the accessible devices requires support from KMD on both PF and VF side.
+ * VMM initiates all required operations through VFIO module, which then
+ * translates them into PF KMD calls. This description will focus on these
+ * calls, leaving out the module which initiates these steps (VFIO).
+ *
+ * In order to start the restore procedure, GuC needs to keep the VF in
+ * proper state. The PF driver can ensure GuC set it to VF_READY state
+ * by provisioning the VF, which in turn can be done after Function Level
+ * Reset of said VF (or after it was freshly created - in that case FLR
+ * is not needed). The FLR procedure ends with GuC sending message
+ * `GUC_PF_NOTIFY_VF_FLR_DONE`, and then provisioning data is sent to GuC.
+ * After the provisioning is completed, the VF needs to be paused, and
+ * at that point the actual restore can begin.
+ *
+ * During VF Restore, state of several resources is restored. These may
+ * include local memory content (system memory is restored by VMM itself),
+ * values of MMIO registers, stateless compression metadata and others.
+ * The final resource which also needs restoring is state of the VF
+ * submission maintained within GuC. For that, `GUC_PF_OPCODE_VF_RESTORE`
+ * message is used, with reference to the state blob to be consumed by
+ * GuC.
+ *
+ * Next, when VFIO is asked to set the VM into running state, the PF driver
+ * sends `GUC_PF_TRIGGER_VF_RESUME` to GuC. When sent after restore, this
+ * changes VF state within GuC to `VF_RESFIX_BLOCKED` rather than the
+ * usual `VF_RUNNING`. At this point GuC triggers an interrupt to inform
+ * the VF KMD within the VM that it was migrated.
+ *
+ * As soon as Virtual GPU of the VM starts, the VF driver within receives
+ * the MIGRATED interrupt and schedules post-migration recovery worker.
+ * That worker queries GuC for new provisioning (using MMIO communication),
+ * and applies fixups to any non-virtualized resources used by the VF.
+ *
+ * When the VF driver is ready to continue operation on the newly connected
+ * hardware, it sends `VF2GUC_NOTIFY_RESFIX_DONE` which causes it to
+ * enter the long awaited `VF_RUNNING` state, and therefore start handling
+ * CTB messages and scheduling workloads from the VF::
+ *
+ * PF GuC VF
+ * [ ] | |
+ * [ ] PF2GUC_VF_CONTROL(pause) | |
+ * [ ]---------------------------> [ ] |
+ * [ ] [ ] GuC sets new VF state to |
+ * [ ] [ ]------- VF_READY_PAUSED |
+ * [ ] [ ] | |
+ * [ ] [ ] <----- |
+ * [ ] success [ ] |
+ * [ ] <---------------------------[ ] |
+ * [ ] | |
+ * [ ] PF loads resources from the | |
+ * [ ]------- saved image supplied | |
+ * [ ] | | |
+ * [ ] <----- | |
+ * [ ] | |
+ * [ ] GUC_PF_OPCODE_VF_RESTORE | |
+ * [ ]---------------------------> [ ] |
+ * [ ] [ ] GuC loads contexts and CTB |
+ * [ ] [ ]------- state from image |
+ * [ ] [ ] | |
+ * [ ] [ ] <----- |
+ * [ ] [ ] |
+ * [ ] [ ] GuC sets new VF state to |
+ * [ ] [ ]------- VF_RESFIX_PAUSED |
+ * [ ] [ ] | |
+ * [ ] success [ ] <----- |
+ * [ ] <---------------------------[ ] |
+ * [ ] | |
+ * [ ] GUC_PF_TRIGGER_VF_RESUME | |
+ * [ ]---------------------------> [ ] |
+ * [ ] [ ] GuC sets new VF state to |
+ * [ ] [ ]------- VF_RESFIX_BLOCKED |
+ * [ ] [ ] | |
+ * [ ] [ ] <----- |
+ * [ ] [ ] |
+ * [ ] [ ] GUC_INTR_SW_INT_0 |
+ * [ ] success [ ]---------------------------> [ ]
+ * [ ] <---------------------------[ ] [ ]
+ * | | VF2GUC_QUERY_SINGLE_KLV [ ]
+ * | [ ] <---------------------------[ ]
+ * | [ ] [ ]
+ * | [ ] new VF provisioning [ ]
+ * | [ ]---------------------------> [ ]
+ * | | [ ]
+ * | | VF driver applies post [ ]
+ * | | migration fixups -------[ ]
+ * | | | [ ]
+ * | | -----> [ ]
+ * | | [ ]
+ * | | VF2GUC_NOTIFY_RESFIX_DONE [ ]
+ * | [ ] <---------------------------[ ]
+ * | [ ] [ ]
+ * | [ ] GuC sets new VF state to [ ]
+ * | [ ]------- VF_RUNNING [ ]
+ * | [ ] | [ ]
+ * | [ ] <----- [ ]
+ * | [ ] success [ ]
+ * | [ ]---------------------------> [ ]
+ * | | |
+ * | | |
+ */
+
+static void migration_worker_func(struct work_struct *w);
+
+/**
+ * xe_sriov_vf_init_early - Initialize SR-IOV VF specific data.
+ * @xe: the &xe_device to initialize
+ */
+void xe_sriov_vf_init_early(struct xe_device *xe)
+{
+ INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func);
+}
+
+/**
+ * vf_post_migration_requery_guc - Re-query GuC for current VF provisioning.
+ * @xe: the &xe_device struct instance
+ *
+ * After migration, we need to re-query all VF configuration to make sure
+ * they match previous provisioning. Note that most of VF provisioning
+ * shall be the same, except GGTT range, since GGTT is not virtualized per-VF.
+ *
+ * Returns: 0 if the operation completed successfully, or a negative error
+ * code otherwise.
+ */
+static int vf_post_migration_requery_guc(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int err, ret = 0;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_vf_query_config(gt);
+ ret = ret ?: err;
+ }
+
+ return ret;
+}
+
+/*
+ * vf_post_migration_imminent - Check if post-restore recovery is coming.
+ * @xe: the &xe_device struct instance
+ *
+ * Return: True if migration recovery worker will soon be running. Any worker currently
+ * executing does not affect the result.
+ */
+static bool vf_post_migration_imminent(struct xe_device *xe)
+{
+ return xe->sriov.vf.migration.gt_flags != 0 ||
+ work_pending(&xe->sriov.vf.migration.worker);
+}
+
+/*
+ * Notify all GuCs about resource fixups apply finished.
+ */
+static void vf_post_migration_notify_resfix_done(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+
+ for_each_gt(gt, xe, id) {
+ if (vf_post_migration_imminent(xe))
+ goto skip;
+ xe_gt_sriov_vf_notify_resfix_done(gt);
+ }
+ return;
+
+skip:
+ drm_dbg(&xe->drm, "another recovery imminent, skipping notifications\n");
+}
+
+static void vf_post_migration_recovery(struct xe_device *xe)
+{
+ int err;
+
+ drm_dbg(&xe->drm, "migration recovery in progress\n");
+ xe_pm_runtime_get(xe);
+ err = vf_post_migration_requery_guc(xe);
+ if (vf_post_migration_imminent(xe))
+ goto defer;
+ if (unlikely(err))
+ goto fail;
+
+ /* FIXME: add the recovery steps */
+ vf_post_migration_notify_resfix_done(xe);
+ xe_pm_runtime_put(xe);
+ drm_notice(&xe->drm, "migration recovery ended\n");
+ return;
+defer:
+ xe_pm_runtime_put(xe);
+ drm_dbg(&xe->drm, "migration recovery deferred\n");
+ return;
+fail:
+ xe_pm_runtime_put(xe);
+ drm_err(&xe->drm, "migration recovery failed (%pe)\n", ERR_PTR(err));
+ xe_device_declare_wedged(xe);
+}
+
+static void migration_worker_func(struct work_struct *w)
+{
+ struct xe_device *xe = container_of(w, struct xe_device,
+ sriov.vf.migration.worker);
+
+ vf_post_migration_recovery(xe);
+}
+
+static bool vf_ready_to_recovery_on_all_gts(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+
+ for_each_gt(gt, xe, id) {
+ if (!test_bit(id, &xe->sriov.vf.migration.gt_flags)) {
+ xe_gt_sriov_dbg_verbose(gt, "still not ready to recover\n");
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * xe_sriov_vf_start_migration_recovery - Start VF migration recovery.
+ * @xe: the &xe_device to start recovery on
+ *
+ * This function shall be called only by VF.
+ */
+void xe_sriov_vf_start_migration_recovery(struct xe_device *xe)
+{
+ bool started;
+
+ xe_assert(xe, IS_SRIOV_VF(xe));
+
+ if (!vf_ready_to_recovery_on_all_gts(xe))
+ return;
+
+ WRITE_ONCE(xe->sriov.vf.migration.gt_flags, 0);
+ /* Ensure other threads see that no flags are set now. */
+ smp_mb();
+
+ started = queue_work(xe->sriov.wq, &xe->sriov.vf.migration.worker);
+ drm_info(&xe->drm, "VF migration recovery %s\n", started ?
+ "scheduled" : "already in progress");
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.h b/drivers/gpu/drm/xe/xe_sriov_vf.h
new file mode 100644
index 000000000000..7b8622cff2b7
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023-2024 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_VF_H_
+#define _XE_SRIOV_VF_H_
+
+struct xe_device;
+
+void xe_sriov_vf_init_early(struct xe_device *xe);
+void xe_sriov_vf_start_migration_recovery(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.c b/drivers/gpu/drm/xe/xe_survivability_mode.c
new file mode 100644
index 000000000000..1f710b3fc599
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_survivability_mode.h"
+#include "xe_survivability_mode_types.h"
+
+#include <linux/kobject.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+
+#include "xe_configfs.h"
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_heci_gsc.h"
+#include "xe_mmio.h"
+#include "xe_pcode_api.h"
+#include "xe_vsec.h"
+
+#define MAX_SCRATCH_MMIO 8
+
+/**
+ * DOC: Xe Boot Survivability
+ *
+ * Boot Survivability is a software based workflow for recovering a system in a failed boot state
+ * Here system recoverability is concerned with recovering the firmware responsible for boot.
+ *
+ * This is implemented by loading the driver with bare minimum (no drm card) to allow the firmware
+ * to be flashed through mei and collect telemetry. The driver's probe flow is modified
+ * such that it enters survivability mode when pcode initialization is incomplete and boot status
+ * denotes a failure.
+ *
+ * Survivability mode can also be entered manually using the survivability mode attribute available
+ * through configfs which is beneficial in several usecases. It can be used to address scenarios
+ * where pcode does not detect failure or for validation purposes. It can also be used in
+ * In-Field-Repair (IFR) to repair a single card without impacting the other cards in a node.
+ *
+ * Use below command enable survivability mode manually::
+ *
+ * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
+ *
+ * Refer :ref:`xe_configfs` for more details on how to use configfs
+ *
+ * Survivability mode is indicated by the below admin-only readable sysfs which provides additional
+ * debug information::
+ *
+ * /sys/bus/pci/devices/<device>/surivability_mode
+ *
+ * Capability Information:
+ * Provides boot status
+ * Postcode Information:
+ * Provides information about the failure
+ * Overflow Information
+ * Provides history of previous failures
+ * Auxiliary Information
+ * Certain failures may have information in addition to postcode information
+ */
+
+static u32 aux_history_offset(u32 reg_value)
+{
+ return REG_FIELD_GET(AUXINFO_HISTORY_OFFSET, reg_value);
+}
+
+static void set_survivability_info(struct xe_mmio *mmio, struct xe_survivability_info *info,
+ int id, char *name)
+{
+ strscpy(info[id].name, name, sizeof(info[id].name));
+ info[id].reg = PCODE_SCRATCH(id).raw;
+ info[id].value = xe_mmio_read32(mmio, PCODE_SCRATCH(id));
+}
+
+static void populate_survivability_info(struct xe_device *xe)
+{
+ struct xe_survivability *survivability = &xe->survivability;
+ struct xe_survivability_info *info = survivability->info;
+ struct xe_mmio *mmio;
+ u32 id = 0, reg_value;
+ char name[NAME_MAX];
+ int index;
+
+ mmio = xe_root_tile_mmio(xe);
+ set_survivability_info(mmio, info, id, "Capability Info");
+ reg_value = info[id].value;
+
+ if (reg_value & HISTORY_TRACKING) {
+ id++;
+ set_survivability_info(mmio, info, id, "Postcode Info");
+
+ if (reg_value & OVERFLOW_SUPPORT) {
+ id = REG_FIELD_GET(OVERFLOW_REG_OFFSET, reg_value);
+ set_survivability_info(mmio, info, id, "Overflow Info");
+ }
+ }
+
+ if (reg_value & AUXINFO_SUPPORT) {
+ id = REG_FIELD_GET(AUXINFO_REG_OFFSET, reg_value);
+
+ for (index = 0; id && reg_value; index++, reg_value = info[id].value,
+ id = aux_history_offset(reg_value)) {
+ snprintf(name, NAME_MAX, "Auxiliary Info %d", index);
+ set_survivability_info(mmio, info, id, name);
+ }
+ }
+}
+
+static void log_survivability_info(struct pci_dev *pdev)
+{
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ struct xe_survivability *survivability = &xe->survivability;
+ struct xe_survivability_info *info = survivability->info;
+ int id;
+
+ dev_info(&pdev->dev, "Survivability Boot Status : Critical Failure (%d)\n",
+ survivability->boot_status);
+ for (id = 0; id < MAX_SCRATCH_MMIO; id++) {
+ if (info[id].reg)
+ dev_info(&pdev->dev, "%s: 0x%x - 0x%x\n", info[id].name,
+ info[id].reg, info[id].value);
+ }
+}
+
+static ssize_t survivability_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buff)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ struct xe_survivability *survivability = &xe->survivability;
+ struct xe_survivability_info *info = survivability->info;
+ int index = 0, count = 0;
+
+ for (index = 0; index < MAX_SCRATCH_MMIO; index++) {
+ if (info[index].reg)
+ count += sysfs_emit_at(buff, count, "%s: 0x%x - 0x%x\n", info[index].name,
+ info[index].reg, info[index].value);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_ADMIN_RO(survivability_mode);
+
+static void xe_survivability_mode_fini(void *arg)
+{
+ struct xe_device *xe = arg;
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct device *dev = &pdev->dev;
+
+ xe_configfs_clear_survivability_mode(pdev);
+ sysfs_remove_file(&dev->kobj, &dev_attr_survivability_mode.attr);
+}
+
+static int enable_survivability_mode(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ struct xe_survivability *survivability = &xe->survivability;
+ int ret = 0;
+
+ /* create survivability mode sysfs */
+ ret = sysfs_create_file(&dev->kobj, &dev_attr_survivability_mode.attr);
+ if (ret) {
+ dev_warn(dev, "Failed to create survivability sysfs files\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(xe->drm.dev,
+ xe_survivability_mode_fini, xe);
+ if (ret)
+ return ret;
+
+ /* Make sure xe_heci_gsc_init() knows about survivability mode */
+ survivability->mode = true;
+
+ ret = xe_heci_gsc_init(xe);
+ if (ret) {
+ /*
+ * But if it fails, device can't enter survivability
+ * so move it back for correct error handling
+ */
+ survivability->mode = false;
+ return ret;
+ }
+
+ xe_vsec_init(xe);
+
+ dev_err(dev, "In Survivability Mode\n");
+
+ return 0;
+}
+
+/**
+ * xe_survivability_mode_is_enabled - check if survivability mode is enabled
+ * @xe: xe device instance
+ *
+ * Returns true if in survivability mode, false otherwise
+ */
+bool xe_survivability_mode_is_enabled(struct xe_device *xe)
+{
+ return xe->survivability.mode;
+}
+
+/**
+ * xe_survivability_mode_is_requested - check if it's possible to enable survivability
+ * mode that was requested by firmware or userspace
+ * @xe: xe device instance
+ *
+ * This function reads configfs and boot status from Pcode.
+ *
+ * Return: true if platform support is available and boot status indicates
+ * failure or if survivability mode is requested, false otherwise.
+ */
+bool xe_survivability_mode_is_requested(struct xe_device *xe)
+{
+ struct xe_survivability *survivability = &xe->survivability;
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ u32 data;
+ bool survivability_mode;
+
+ if (!IS_DGFX(xe) || IS_SRIOV_VF(xe))
+ return false;
+
+ survivability_mode = xe_configfs_get_survivability_mode(pdev);
+
+ if (xe->info.platform < XE_BATTLEMAGE) {
+ if (survivability_mode) {
+ dev_err(&pdev->dev, "Survivability Mode is not supported on this card\n");
+ xe_configfs_clear_survivability_mode(pdev);
+ }
+ return false;
+ }
+
+ /* Enable survivability mode if set via configfs */
+ if (survivability_mode)
+ return true;
+
+ data = xe_mmio_read32(mmio, PCODE_SCRATCH(0));
+ survivability->boot_status = REG_FIELD_GET(BOOT_STATUS, data);
+
+ return survivability->boot_status == NON_CRITICAL_FAILURE ||
+ survivability->boot_status == CRITICAL_FAILURE;
+}
+
+/**
+ * xe_survivability_mode_enable - Initialize and enable the survivability mode
+ * @xe: xe device instance
+ *
+ * Initialize survivability information and enable survivability mode
+ *
+ * Return: 0 if survivability mode is enabled or not requested; negative error
+ * code otherwise.
+ */
+int xe_survivability_mode_enable(struct xe_device *xe)
+{
+ struct xe_survivability *survivability = &xe->survivability;
+ struct xe_survivability_info *info;
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+
+ if (!xe_survivability_mode_is_requested(xe))
+ return 0;
+
+ survivability->size = MAX_SCRATCH_MMIO;
+
+ info = devm_kcalloc(xe->drm.dev, survivability->size, sizeof(*info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ survivability->info = info;
+
+ populate_survivability_info(xe);
+
+ /* Only log debug information and exit if it is a critical failure */
+ if (survivability->boot_status == CRITICAL_FAILURE) {
+ log_survivability_info(pdev);
+ return -ENXIO;
+ }
+
+ return enable_survivability_mode(pdev);
+}
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.h b/drivers/gpu/drm/xe/xe_survivability_mode.h
new file mode 100644
index 000000000000..02231c2bf008
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SURVIVABILITY_MODE_H_
+#define _XE_SURVIVABILITY_MODE_H_
+
+#include <linux/types.h>
+
+struct xe_device;
+
+int xe_survivability_mode_enable(struct xe_device *xe);
+bool xe_survivability_mode_is_enabled(struct xe_device *xe);
+bool xe_survivability_mode_is_requested(struct xe_device *xe);
+
+#endif /* _XE_SURVIVABILITY_MODE_H_ */
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode_types.h b/drivers/gpu/drm/xe/xe_survivability_mode_types.h
new file mode 100644
index 000000000000..19d433e253df
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_survivability_mode_types.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SURVIVABILITY_MODE_TYPES_H_
+#define _XE_SURVIVABILITY_MODE_TYPES_H_
+
+#include <linux/limits.h>
+#include <linux/types.h>
+
+struct xe_survivability_info {
+ char name[NAME_MAX];
+ u32 reg;
+ u32 value;
+};
+
+/**
+ * struct xe_survivability: Contains survivability mode information
+ */
+struct xe_survivability {
+ /** @info: struct that holds survivability info from scratch registers */
+ struct xe_survivability_info *info;
+
+ /** @size: number of scratch registers */
+ u32 size;
+
+ /** @boot_status: indicates critical/non critical boot failure */
+ u8 boot_status;
+
+ /** @mode: boolean to indicate survivability mode */
+ bool mode;
+};
+
+#endif /* _XE_SURVIVABILITY_MODE_TYPES_H_ */
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
new file mode 100644
index 000000000000..f0b167b3fb6a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -0,0 +1,1043 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "xe_bo.h"
+#include "xe_gt_stats.h"
+#include "xe_gt_tlb_invalidation.h"
+#include "xe_migrate.h"
+#include "xe_module.h"
+#include "xe_pt.h"
+#include "xe_svm.h"
+#include "xe_ttm_vram_mgr.h"
+#include "xe_vm.h"
+#include "xe_vm_types.h"
+
+static bool xe_svm_range_in_vram(struct xe_svm_range *range)
+{
+ /*
+ * Advisory only check whether the range is currently backed by VRAM
+ * memory.
+ */
+
+ struct drm_gpusvm_range_flags flags = {
+ /* Pairs with WRITE_ONCE in drm_gpusvm.c */
+ .__flags = READ_ONCE(range->base.flags.__flags),
+ };
+
+ return flags.has_devmem_pages;
+}
+
+static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
+{
+ /* Not reliable without notifier lock */
+ return xe_svm_range_in_vram(range) && range->tile_present;
+}
+
+static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
+{
+ return container_of(gpusvm, struct xe_vm, svm.gpusvm);
+}
+
+static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
+{
+ return gpusvm_to_vm(r->gpusvm);
+}
+
+static unsigned long xe_svm_range_start(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_start(&range->base);
+}
+
+static unsigned long xe_svm_range_end(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_end(&range->base);
+}
+
+static unsigned long xe_svm_range_size(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_size(&range->base);
+}
+
+#define range_debug(r__, operaton__) \
+ vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
+ "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
+ "start=0x%014lx, end=0x%014lx, size=%lu", \
+ (operaton__), range_to_vm(&(r__)->base)->usm.asid, \
+ (r__)->base.gpusvm, \
+ xe_svm_range_in_vram((r__)) ? 1 : 0, \
+ xe_svm_range_has_vram_binding((r__)) ? 1 : 0, \
+ (r__)->base.notifier_seq, \
+ xe_svm_range_start((r__)), xe_svm_range_end((r__)), \
+ xe_svm_range_size((r__)))
+
+void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
+{
+ range_debug(range, operation);
+}
+
+static void *xe_svm_devm_owner(struct xe_device *xe)
+{
+ return xe;
+}
+
+static struct drm_gpusvm_range *
+xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
+{
+ struct xe_svm_range *range;
+
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (!range)
+ return NULL;
+
+ INIT_LIST_HEAD(&range->garbage_collector_link);
+ xe_vm_get(gpusvm_to_vm(gpusvm));
+
+ return &range->base;
+}
+
+static void xe_svm_range_free(struct drm_gpusvm_range *range)
+{
+ xe_vm_put(range_to_vm(range));
+ kfree(range);
+}
+
+static struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
+{
+ return container_of(r, struct xe_svm_range, base);
+}
+
+static void
+xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
+ const struct mmu_notifier_range *mmu_range)
+{
+ struct xe_device *xe = vm->xe;
+
+ range_debug(range, "GARBAGE COLLECTOR ADD");
+
+ drm_gpusvm_range_set_unmapped(&range->base, mmu_range);
+
+ spin_lock(&vm->svm.garbage_collector.lock);
+ if (list_empty(&range->garbage_collector_link))
+ list_add_tail(&range->garbage_collector_link,
+ &vm->svm.garbage_collector.range_list);
+ spin_unlock(&vm->svm.garbage_collector.lock);
+
+ queue_work(xe_device_get_root_tile(xe)->primary_gt->usm.pf_wq,
+ &vm->svm.garbage_collector.work);
+}
+
+static u8
+xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
+ const struct mmu_notifier_range *mmu_range,
+ u64 *adj_start, u64 *adj_end)
+{
+ struct xe_svm_range *range = to_xe_range(r);
+ struct xe_device *xe = vm->xe;
+ struct xe_tile *tile;
+ u8 tile_mask = 0;
+ u8 id;
+
+ xe_svm_assert_in_notifier(vm);
+
+ range_debug(range, "NOTIFIER");
+
+ /* Skip if already unmapped or if no binding exist */
+ if (range->base.flags.unmapped || !range->tile_present)
+ return 0;
+
+ range_debug(range, "NOTIFIER - EXECUTE");
+
+ /* Adjust invalidation to range boundaries */
+ *adj_start = min(xe_svm_range_start(range), mmu_range->start);
+ *adj_end = max(xe_svm_range_end(range), mmu_range->end);
+
+ /*
+ * XXX: Ideally would zap PTEs in one shot in xe_svm_invalidate but the
+ * invalidation code can't correctly cope with sparse ranges or
+ * invalidations spanning multiple ranges.
+ */
+ for_each_tile(tile, xe, id)
+ if (xe_pt_zap_ptes_range(tile, vm, range)) {
+ tile_mask |= BIT(id);
+ range->tile_invalidated |= BIT(id);
+ }
+
+ return tile_mask;
+}
+
+static void
+xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
+ const struct mmu_notifier_range *mmu_range)
+{
+ struct drm_gpusvm_ctx ctx = { .in_notifier = true, };
+
+ xe_svm_assert_in_notifier(vm);
+
+ drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
+ if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP)
+ xe_svm_garbage_collector_add_range(vm, to_xe_range(r),
+ mmu_range);
+}
+
+static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_notifier *notifier,
+ const struct mmu_notifier_range *mmu_range)
+{
+ struct xe_vm *vm = gpusvm_to_vm(gpusvm);
+ struct xe_device *xe = vm->xe;
+ struct xe_tile *tile;
+ struct drm_gpusvm_range *r, *first;
+ struct xe_gt_tlb_invalidation_fence
+ fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
+ u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
+ u8 tile_mask = 0;
+ u8 id;
+ u32 fence_id = 0;
+ long err;
+
+ xe_svm_assert_in_notifier(vm);
+
+ vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
+ "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d",
+ vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq,
+ mmu_range->start, mmu_range->end, mmu_range->event);
+
+ /* Adjust invalidation to notifier boundaries */
+ adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
+ adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);
+
+ first = drm_gpusvm_range_find(notifier, adj_start, adj_end);
+ if (!first)
+ return;
+
+ /*
+ * PTs may be getting destroyed so not safe to touch these but PT should
+ * be invalidated at this point in time. Regardless we still need to
+ * ensure any dma mappings are unmapped in the here.
+ */
+ if (xe_vm_is_closed(vm))
+ goto range_notifier_event_end;
+
+ /*
+ * XXX: Less than ideal to always wait on VM's resv slots if an
+ * invalidation is not required. Could walk range list twice to figure
+ * out if an invalidations is need, but also not ideal.
+ */
+ err = dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ XE_WARN_ON(err <= 0);
+
+ r = first;
+ drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
+ tile_mask |= xe_svm_range_notifier_event_begin(vm, r, mmu_range,
+ &adj_start,
+ &adj_end);
+ if (!tile_mask)
+ goto range_notifier_event_end;
+
+ xe_device_wmb(xe);
+
+ for_each_tile(tile, xe, id) {
+ if (tile_mask & BIT(id)) {
+ int err;
+
+ xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
+ &fence[fence_id], true);
+
+ err = xe_gt_tlb_invalidation_range(tile->primary_gt,
+ &fence[fence_id],
+ adj_start,
+ adj_end,
+ vm->usm.asid);
+ if (WARN_ON_ONCE(err < 0))
+ goto wait;
+ ++fence_id;
+
+ if (!tile->media_gt)
+ continue;
+
+ xe_gt_tlb_invalidation_fence_init(tile->media_gt,
+ &fence[fence_id], true);
+
+ err = xe_gt_tlb_invalidation_range(tile->media_gt,
+ &fence[fence_id],
+ adj_start,
+ adj_end,
+ vm->usm.asid);
+ if (WARN_ON_ONCE(err < 0))
+ goto wait;
+ ++fence_id;
+ }
+ }
+
+wait:
+ for (id = 0; id < fence_id; ++id)
+ xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+
+range_notifier_event_end:
+ r = first;
+ drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
+ xe_svm_range_notifier_event_end(vm, r, mmu_range);
+}
+
+static int __xe_svm_garbage_collector(struct xe_vm *vm,
+ struct xe_svm_range *range)
+{
+ struct dma_fence *fence;
+
+ range_debug(range, "GARBAGE COLLECTOR");
+
+ xe_vm_lock(vm, false);
+ fence = xe_vm_range_unbind(vm, range);
+ xe_vm_unlock(vm);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+ dma_fence_put(fence);
+
+ drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);
+
+ return 0;
+}
+
+static int xe_svm_garbage_collector(struct xe_vm *vm)
+{
+ struct xe_svm_range *range;
+ int err;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ if (xe_vm_is_closed_or_banned(vm))
+ return -ENOENT;
+
+ spin_lock(&vm->svm.garbage_collector.lock);
+ for (;;) {
+ range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
+ typeof(*range),
+ garbage_collector_link);
+ if (!range)
+ break;
+
+ list_del(&range->garbage_collector_link);
+ spin_unlock(&vm->svm.garbage_collector.lock);
+
+ err = __xe_svm_garbage_collector(vm, range);
+ if (err) {
+ drm_warn(&vm->xe->drm,
+ "Garbage collection failed: %pe\n",
+ ERR_PTR(err));
+ xe_vm_kill(vm, true);
+ return err;
+ }
+
+ spin_lock(&vm->svm.garbage_collector.lock);
+ }
+ spin_unlock(&vm->svm.garbage_collector.lock);
+
+ return 0;
+}
+
+static void xe_svm_garbage_collector_work_func(struct work_struct *w)
+{
+ struct xe_vm *vm = container_of(w, struct xe_vm,
+ svm.garbage_collector.work);
+
+ down_write(&vm->lock);
+ xe_svm_garbage_collector(vm);
+ up_write(&vm->lock);
+}
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+
+static struct xe_vram_region *page_to_vr(struct page *page)
+{
+ return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
+}
+
+static struct xe_tile *vr_to_tile(struct xe_vram_region *vr)
+{
+ return container_of(vr, struct xe_tile, mem.vram);
+}
+
+static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
+ struct page *page)
+{
+ u64 dpa;
+ struct xe_tile *tile = vr_to_tile(vr);
+ u64 pfn = page_to_pfn(page);
+ u64 offset;
+
+ xe_tile_assert(tile, is_device_private_page(page));
+ xe_tile_assert(tile, (pfn << PAGE_SHIFT) >= vr->hpa_base);
+
+ offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
+ dpa = vr->dpa_base + offset;
+
+ return dpa;
+}
+
+enum xe_svm_copy_dir {
+ XE_SVM_COPY_TO_VRAM,
+ XE_SVM_COPY_TO_SRAM,
+};
+
+static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
+ unsigned long npages, const enum xe_svm_copy_dir dir)
+{
+ struct xe_vram_region *vr = NULL;
+ struct xe_tile *tile;
+ struct dma_fence *fence = NULL;
+ unsigned long i;
+#define XE_VRAM_ADDR_INVALID ~0x0ull
+ u64 vram_addr = XE_VRAM_ADDR_INVALID;
+ int err = 0, pos = 0;
+ bool sram = dir == XE_SVM_COPY_TO_SRAM;
+
+ /*
+ * This flow is complex: it locates physically contiguous device pages,
+ * derives the starting physical address, and performs a single GPU copy
+ * to for every 8M chunk in a DMA address array. Both device pages and
+ * DMA addresses may be sparsely populated. If either is NULL, a copy is
+ * triggered based on the current search state. The last GPU copy is
+ * waited on to ensure all copies are complete.
+ */
+
+ for (i = 0; i < npages; ++i) {
+ struct page *spage = pages[i];
+ struct dma_fence *__fence;
+ u64 __vram_addr;
+ bool match = false, chunk, last;
+
+#define XE_MIGRATE_CHUNK_SIZE SZ_8M
+ chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
+ last = (i + 1) == npages;
+
+ /* No CPU page and no device pages queue'd to copy */
+ if (!dma_addr[i] && vram_addr == XE_VRAM_ADDR_INVALID)
+ continue;
+
+ if (!vr && spage) {
+ vr = page_to_vr(spage);
+ tile = vr_to_tile(vr);
+ }
+ XE_WARN_ON(spage && page_to_vr(spage) != vr);
+
+ /*
+ * CPU page and device page valid, capture physical address on
+ * first device page, check if physical contiguous on subsequent
+ * device pages.
+ */
+ if (dma_addr[i] && spage) {
+ __vram_addr = xe_vram_region_page_to_dpa(vr, spage);
+ if (vram_addr == XE_VRAM_ADDR_INVALID) {
+ vram_addr = __vram_addr;
+ pos = i;
+ }
+
+ match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
+ }
+
+ /*
+ * Mismatched physical address, 8M copy chunk, or last page -
+ * trigger a copy.
+ */
+ if (!match || chunk || last) {
+ /*
+ * Extra page for first copy if last page and matching
+ * physical address.
+ */
+ int incr = (match && last) ? 1 : 0;
+
+ if (vram_addr != XE_VRAM_ADDR_INVALID) {
+ if (sram) {
+ vm_dbg(&tile->xe->drm,
+ "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
+ vram_addr, (u64)dma_addr[pos], i - pos + incr);
+ __fence = xe_migrate_from_vram(tile->migrate,
+ i - pos + incr,
+ vram_addr,
+ dma_addr + pos);
+ } else {
+ vm_dbg(&tile->xe->drm,
+ "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
+ (u64)dma_addr[pos], vram_addr, i - pos + incr);
+ __fence = xe_migrate_to_vram(tile->migrate,
+ i - pos + incr,
+ dma_addr + pos,
+ vram_addr);
+ }
+ if (IS_ERR(__fence)) {
+ err = PTR_ERR(__fence);
+ goto err_out;
+ }
+
+ dma_fence_put(fence);
+ fence = __fence;
+ }
+
+ /* Setup physical address of next device page */
+ if (dma_addr[i] && spage) {
+ vram_addr = __vram_addr;
+ pos = i;
+ } else {
+ vram_addr = XE_VRAM_ADDR_INVALID;
+ }
+
+ /* Extra mismatched device page, copy it */
+ if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
+ if (sram) {
+ vm_dbg(&tile->xe->drm,
+ "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
+ vram_addr, (u64)dma_addr[pos], 1);
+ __fence = xe_migrate_from_vram(tile->migrate, 1,
+ vram_addr,
+ dma_addr + pos);
+ } else {
+ vm_dbg(&tile->xe->drm,
+ "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
+ (u64)dma_addr[pos], vram_addr, 1);
+ __fence = xe_migrate_to_vram(tile->migrate, 1,
+ dma_addr + pos,
+ vram_addr);
+ }
+ if (IS_ERR(__fence)) {
+ err = PTR_ERR(__fence);
+ goto err_out;
+ }
+
+ dma_fence_put(fence);
+ fence = __fence;
+ }
+ }
+ }
+
+err_out:
+ /* Wait for all copies to complete */
+ if (fence) {
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ }
+
+ return err;
+#undef XE_MIGRATE_CHUNK_SIZE
+#undef XE_VRAM_ADDR_INVALID
+}
+
+static int xe_svm_copy_to_devmem(struct page **pages, dma_addr_t *dma_addr,
+ unsigned long npages)
+{
+ return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_VRAM);
+}
+
+static int xe_svm_copy_to_ram(struct page **pages, dma_addr_t *dma_addr,
+ unsigned long npages)
+{
+ return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_SRAM);
+}
+
+static struct xe_bo *to_xe_bo(struct drm_gpusvm_devmem *devmem_allocation)
+{
+ return container_of(devmem_allocation, struct xe_bo, devmem_allocation);
+}
+
+static void xe_svm_devmem_release(struct drm_gpusvm_devmem *devmem_allocation)
+{
+ struct xe_bo *bo = to_xe_bo(devmem_allocation);
+
+ xe_bo_put_async(bo);
+}
+
+static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
+{
+ return PHYS_PFN(offset + vr->hpa_base);
+}
+
+static struct drm_buddy *tile_to_buddy(struct xe_tile *tile)
+{
+ return &tile->mem.vram.ttm.mm;
+}
+
+static int xe_svm_populate_devmem_pfn(struct drm_gpusvm_devmem *devmem_allocation,
+ unsigned long npages, unsigned long *pfn)
+{
+ struct xe_bo *bo = to_xe_bo(devmem_allocation);
+ struct ttm_resource *res = bo->ttm.resource;
+ struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks;
+ struct drm_buddy_block *block;
+ int j = 0;
+
+ list_for_each_entry(block, blocks, link) {
+ struct xe_vram_region *vr = block->private;
+ struct xe_tile *tile = vr_to_tile(vr);
+ struct drm_buddy *buddy = tile_to_buddy(tile);
+ u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
+ int i;
+
+ for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i)
+ pfn[j++] = block_pfn + i;
+ }
+
+ return 0;
+}
+
+static const struct drm_gpusvm_devmem_ops gpusvm_devmem_ops = {
+ .devmem_release = xe_svm_devmem_release,
+ .populate_devmem_pfn = xe_svm_populate_devmem_pfn,
+ .copy_to_devmem = xe_svm_copy_to_devmem,
+ .copy_to_ram = xe_svm_copy_to_ram,
+};
+
+#endif
+
+static const struct drm_gpusvm_ops gpusvm_ops = {
+ .range_alloc = xe_svm_range_alloc,
+ .range_free = xe_svm_range_free,
+ .invalidate = xe_svm_invalidate,
+};
+
+static const unsigned long fault_chunk_sizes[] = {
+ SZ_2M,
+ SZ_64K,
+ SZ_4K,
+};
+
+/**
+ * xe_svm_init() - SVM initialize
+ * @vm: The VM.
+ *
+ * Initialize SVM state which is embedded within the VM.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_svm_init(struct xe_vm *vm)
+{
+ int err;
+
+ spin_lock_init(&vm->svm.garbage_collector.lock);
+ INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
+ INIT_WORK(&vm->svm.garbage_collector.work,
+ xe_svm_garbage_collector_work_func);
+
+ err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
+ current->mm, xe_svm_devm_owner(vm->xe), 0,
+ vm->size, xe_modparam.svm_notifier_size * SZ_1M,
+ &gpusvm_ops, fault_chunk_sizes,
+ ARRAY_SIZE(fault_chunk_sizes));
+ if (err)
+ return err;
+
+ drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
+
+ return 0;
+}
+
+/**
+ * xe_svm_close() - SVM close
+ * @vm: The VM.
+ *
+ * Close SVM state (i.e., stop and flush all SVM actions).
+ */
+void xe_svm_close(struct xe_vm *vm)
+{
+ xe_assert(vm->xe, xe_vm_is_closed(vm));
+ flush_work(&vm->svm.garbage_collector.work);
+}
+
+/**
+ * xe_svm_fini() - SVM finalize
+ * @vm: The VM.
+ *
+ * Finalize SVM state which is embedded within the VM.
+ */
+void xe_svm_fini(struct xe_vm *vm)
+{
+ xe_assert(vm->xe, xe_vm_is_closed(vm));
+
+ drm_gpusvm_fini(&vm->svm.gpusvm);
+}
+
+static bool xe_svm_range_is_valid(struct xe_svm_range *range,
+ struct xe_tile *tile,
+ bool devmem_only)
+{
+ /*
+ * Advisory only check whether the range currently has a valid mapping,
+ * READ_ONCE pairs with WRITE_ONCE in xe_pt.c
+ */
+ return ((READ_ONCE(range->tile_present) &
+ ~READ_ONCE(range->tile_invalidated)) & BIT(tile->id)) &&
+ (!devmem_only || xe_svm_range_in_vram(range));
+}
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
+{
+ return &tile->mem.vram;
+}
+
+static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ struct mm_struct *mm = vm->svm.gpusvm.mm;
+ struct xe_vram_region *vr = tile_to_vr(tile);
+ struct drm_buddy_block *block;
+ struct list_head *blocks;
+ struct xe_bo *bo;
+ ktime_t end = 0;
+ int err;
+
+ range_debug(range, "ALLOCATE VRAM");
+
+ if (!mmget_not_zero(mm))
+ return -EFAULT;
+ mmap_read_lock(mm);
+
+retry:
+ bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL,
+ xe_svm_range_size(range),
+ ttm_bo_type_device,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_CPU_ADDR_MIRROR);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ if (xe_vm_validate_should_retry(NULL, err, &end))
+ goto retry;
+ goto unlock;
+ }
+
+ drm_gpusvm_devmem_init(&bo->devmem_allocation,
+ vm->xe->drm.dev, mm,
+ &gpusvm_devmem_ops,
+ &tile->mem.vram.dpagemap,
+ xe_svm_range_size(range));
+
+ blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
+ list_for_each_entry(block, blocks, link)
+ block->private = vr;
+
+ xe_bo_get(bo);
+ err = drm_gpusvm_migrate_to_devmem(&vm->svm.gpusvm, &range->base,
+ &bo->devmem_allocation, ctx);
+ if (err)
+ xe_svm_devmem_release(&bo->devmem_allocation);
+
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
+
+unlock:
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return err;
+}
+#else
+static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static bool supports_4K_migration(struct xe_device *xe)
+{
+ if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
+ return false;
+
+ return true;
+}
+
+static bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range,
+ struct xe_vma *vma)
+{
+ struct xe_vm *vm = range_to_vm(&range->base);
+ u64 range_size = xe_svm_range_size(range);
+
+ if (!range->base.flags.migrate_devmem)
+ return false;
+
+ if (xe_svm_range_in_vram(range)) {
+ drm_dbg(&vm->xe->drm, "Range is already in VRAM\n");
+ return false;
+ }
+
+ if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
+ drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * xe_svm_handle_pagefault() - SVM handle page fault
+ * @vm: The VM.
+ * @vma: The CPU address mirror VMA.
+ * @gt: The gt upon the fault occurred.
+ * @fault_addr: The GPU fault address.
+ * @atomic: The fault atomic access bit.
+ *
+ * Create GPU bindings for a SVM page fault. Optionally migrate to device
+ * memory.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_gt *gt, u64 fault_addr,
+ bool atomic)
+{
+ struct drm_gpusvm_ctx ctx = {
+ .read_only = xe_vma_read_only(vma),
+ .devmem_possible = IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
+ .check_pages_threshold = IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0,
+ .devmem_only = atomic && IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
+ .timeslice_ms = atomic && IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? 5 : 0,
+ };
+ struct xe_svm_range *range;
+ struct drm_gpusvm_range *r;
+ struct drm_exec exec;
+ struct dma_fence *fence;
+ int migrate_try_count = ctx.devmem_only ? 3 : 1;
+ struct xe_tile *tile = gt_to_tile(gt);
+ ktime_t end = 0;
+ int err;
+
+ lockdep_assert_held_write(&vm->lock);
+ xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
+
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);
+
+retry:
+ /* Always process UNMAPs first so view SVM ranges is current */
+ err = xe_svm_garbage_collector(vm);
+ if (err)
+ return err;
+
+ r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, fault_addr,
+ xe_vma_start(vma), xe_vma_end(vma),
+ &ctx);
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+
+ if (ctx.devmem_only && !r->flags.migrate_devmem)
+ return -EACCES;
+
+ range = to_xe_range(r);
+ if (xe_svm_range_is_valid(range, tile, ctx.devmem_only))
+ return 0;
+
+ range_debug(range, "PAGE FAULT");
+
+ if (--migrate_try_count >= 0 &&
+ xe_svm_range_needs_migrate_to_vram(range, vma)) {
+ err = xe_svm_alloc_vram(vm, tile, range, &ctx);
+ ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
+ if (err) {
+ if (migrate_try_count || !ctx.devmem_only) {
+ drm_dbg(&vm->xe->drm,
+ "VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",
+ vm->usm.asid, ERR_PTR(err));
+ goto retry;
+ } else {
+ drm_err(&vm->xe->drm,
+ "VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n",
+ vm->usm.asid, ERR_PTR(err));
+ return err;
+ }
+ }
+ }
+
+ range_debug(range, "GET PAGES");
+ err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx);
+ /* Corner where CPU mappings have changed */
+ if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
+ ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
+ if (migrate_try_count > 0 || !ctx.devmem_only) {
+ if (err == -EOPNOTSUPP) {
+ range_debug(range, "PAGE FAULT - EVICT PAGES");
+ drm_gpusvm_range_evict(&vm->svm.gpusvm,
+ &range->base);
+ }
+ drm_dbg(&vm->xe->drm,
+ "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
+ range_debug(range, "PAGE FAULT - RETRY PAGES");
+ goto retry;
+ } else {
+ drm_err(&vm->xe->drm,
+ "Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
+ }
+ }
+ if (err) {
+ range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
+ goto err_out;
+ }
+
+ range_debug(range, "PAGE FAULT - BIND");
+
+retry_bind:
+ drm_exec_init(&exec, 0, 0);
+ drm_exec_until_all_locked(&exec) {
+ err = drm_exec_lock_obj(&exec, vm->gpuvm.r_obj);
+ drm_exec_retry_on_contention(&exec);
+ if (err) {
+ drm_exec_fini(&exec);
+ goto err_out;
+ }
+
+ fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
+ if (IS_ERR(fence)) {
+ drm_exec_fini(&exec);
+ err = PTR_ERR(fence);
+ if (err == -EAGAIN) {
+ ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
+ range_debug(range, "PAGE FAULT - RETRY BIND");
+ goto retry;
+ }
+ if (xe_vm_validate_should_retry(&exec, err, &end))
+ goto retry_bind;
+ goto err_out;
+ }
+ }
+ drm_exec_fini(&exec);
+
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+
+err_out:
+
+ return err;
+}
+
+/**
+ * xe_svm_has_mapping() - SVM has mappings
+ * @vm: The VM.
+ * @start: Start address.
+ * @end: End address.
+ *
+ * Check if an address range has SVM mappings.
+ *
+ * Return: True if address range has a SVM mapping, False otherwise
+ */
+bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
+{
+ return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
+}
+
+/**
+ * xe_svm_bo_evict() - SVM evict BO to system memory
+ * @bo: BO to evict
+ *
+ * SVM evict BO to system memory. GPU SVM layer ensures all device pages
+ * are evicted before returning.
+ *
+ * Return: 0 on success standard error code otherwise
+ */
+int xe_svm_bo_evict(struct xe_bo *bo)
+{
+ return drm_gpusvm_evict_to_ram(&bo->devmem_allocation);
+}
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+
+static struct drm_pagemap_device_addr
+xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
+ struct device *dev,
+ struct page *page,
+ unsigned int order,
+ enum dma_data_direction dir)
+{
+ struct device *pgmap_dev = dpagemap->dev;
+ enum drm_interconnect_protocol prot;
+ dma_addr_t addr;
+
+ if (pgmap_dev == dev) {
+ addr = xe_vram_region_page_to_dpa(page_to_vr(page), page);
+ prot = XE_INTERCONNECT_VRAM;
+ } else {
+ addr = DMA_MAPPING_ERROR;
+ prot = 0;
+ }
+
+ return drm_pagemap_device_addr_encode(addr, prot, order, dir);
+}
+
+static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
+ .device_map = xe_drm_pagemap_device_map,
+};
+
+/**
+ * xe_devm_add: Remap and provide memmap backing for device memory
+ * @tile: tile that the memory region belongs to
+ * @vr: vram memory region to remap
+ *
+ * This remap device memory to host physical address space and create
+ * struct page to back device memory
+ *
+ * Return: 0 on success standard error code otherwise
+ */
+int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct device *dev = &to_pci_dev(xe->drm.dev)->dev;
+ struct resource *res;
+ void *addr;
+ int ret;
+
+ res = devm_request_free_mem_region(dev, &iomem_resource,
+ vr->usable_size);
+ if (IS_ERR(res)) {
+ ret = PTR_ERR(res);
+ return ret;
+ }
+
+ vr->pagemap.type = MEMORY_DEVICE_PRIVATE;
+ vr->pagemap.range.start = res->start;
+ vr->pagemap.range.end = res->end;
+ vr->pagemap.nr_range = 1;
+ vr->pagemap.ops = drm_gpusvm_pagemap_ops_get();
+ vr->pagemap.owner = xe_svm_devm_owner(xe);
+ addr = devm_memremap_pages(dev, &vr->pagemap);
+
+ vr->dpagemap.dev = dev;
+ vr->dpagemap.ops = &xe_drm_pagemap_ops;
+
+ if (IS_ERR(addr)) {
+ devm_release_mem_region(dev, res->start, resource_size(res));
+ ret = PTR_ERR(addr);
+ drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n",
+ tile->id, ERR_PTR(ret));
+ return ret;
+ }
+ vr->hpa_base = res->start;
+
+ drm_dbg(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n",
+ tile->id, vr->io_start, vr->io_start + vr->usable_size, res);
+ return 0;
+}
+#else
+int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
+{
+ return 0;
+}
+#endif
+
+/**
+ * xe_svm_flush() - SVM flush
+ * @vm: The VM.
+ *
+ * Flush all SVM actions.
+ */
+void xe_svm_flush(struct xe_vm *vm)
+{
+ if (xe_vm_in_fault_mode(vm))
+ flush_work(&vm->svm.garbage_collector.work);
+}
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
new file mode 100644
index 000000000000..30fc78b85b30
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_SVM_H_
+#define _XE_SVM_H_
+
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
+
+#include <drm/drm_pagemap.h>
+#include <drm/drm_gpusvm.h>
+
+#define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
+
+struct xe_bo;
+struct xe_gt;
+struct xe_tile;
+struct xe_vm;
+struct xe_vma;
+struct xe_vram_region;
+
+/** struct xe_svm_range - SVM range */
+struct xe_svm_range {
+ /** @base: base drm_gpusvm_range */
+ struct drm_gpusvm_range base;
+ /**
+ * @garbage_collector_link: Link into VM's garbage collect SVM range
+ * list. Protected by VM's garbage collect lock.
+ */
+ struct list_head garbage_collector_link;
+ /**
+ * @tile_present: Tile mask of binding is present for this range.
+ * Protected by GPU SVM notifier lock.
+ */
+ u8 tile_present;
+ /**
+ * @tile_invalidated: Tile mask of binding is invalidated for this
+ * range. Protected by GPU SVM notifier lock.
+ */
+ u8 tile_invalidated;
+};
+
+/**
+ * xe_svm_range_pages_valid() - SVM range pages valid
+ * @range: SVM range
+ *
+ * Return: True if SVM range pages are valid, False otherwise
+ */
+static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
+}
+
+int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr);
+
+int xe_svm_init(struct xe_vm *vm);
+
+void xe_svm_fini(struct xe_vm *vm);
+
+void xe_svm_close(struct xe_vm *vm);
+
+int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_gt *gt, u64 fault_addr,
+ bool atomic);
+
+bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end);
+
+int xe_svm_bo_evict(struct xe_bo *bo);
+
+void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
+
+/**
+ * xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
+ * @range: SVM range
+ *
+ * Return: True if SVM range has a DMA mapping, False otherwise
+ */
+static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
+{
+ lockdep_assert_held(&range->base.gpusvm->notifier_lock);
+ return range->base.flags.has_dma_mapping;
+}
+
+#define xe_svm_assert_in_notifier(vm__) \
+ lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
+
+#define xe_svm_notifier_lock(vm__) \
+ drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
+
+#define xe_svm_notifier_unlock(vm__) \
+ drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
+
+void xe_svm_flush(struct xe_vm *vm);
+
+#else
+#include <linux/interval_tree.h>
+
+struct drm_pagemap_device_addr;
+struct xe_bo;
+struct xe_gt;
+struct xe_vm;
+struct xe_vma;
+struct xe_tile;
+struct xe_vram_region;
+
+#define XE_INTERCONNECT_VRAM 1
+
+struct xe_svm_range {
+ struct {
+ struct interval_tree_node itree;
+ const struct drm_pagemap_device_addr *dma_addr;
+ } base;
+ u32 tile_present;
+ u32 tile_invalidated;
+};
+
+static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
+{
+ return false;
+}
+
+static inline
+int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
+{
+ return 0;
+}
+
+static inline
+int xe_svm_init(struct xe_vm *vm)
+{
+ return 0;
+}
+
+static inline
+void xe_svm_fini(struct xe_vm *vm)
+{
+}
+
+static inline
+void xe_svm_close(struct xe_vm *vm)
+{
+}
+
+static inline
+int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_gt *gt, u64 fault_addr,
+ bool atomic)
+{
+ return 0;
+}
+
+static inline
+bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
+{
+ return false;
+}
+
+static inline
+int xe_svm_bo_evict(struct xe_bo *bo)
+{
+ return 0;
+}
+
+static inline
+void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
+{
+}
+
+#define xe_svm_assert_in_notifier(...) do {} while (0)
+#define xe_svm_range_has_dma_mapping(...) false
+
+static inline void xe_svm_notifier_lock(struct xe_vm *vm)
+{
+}
+
+static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
+{
+}
+
+static inline void xe_svm_flush(struct xe_vm *vm)
+{
+}
+#endif
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index 2e72c06fd40d..f87276df18f2 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -83,10 +83,16 @@ static void user_fence_worker(struct work_struct *w)
XE_WARN_ON("Copy to user failed");
kthread_unuse_mm(ufence->mm);
mmput(ufence->mm);
+ } else {
+ drm_dbg(&ufence->xe->drm, "mmget_not_zero() failed, ufence wasn't signaled\n");
}
- wake_up_all(&ufence->xe->ufence_wq);
+ /*
+ * Wake up waiters only after updating the ufence state, allowing the UMD
+ * to safely reuse the same ufence without encountering -EBUSY errors.
+ */
WRITE_ONCE(ufence->signalled, 1);
+ wake_up_all(&ufence->xe->ufence_wq);
user_fence_put(ufence);
}
@@ -204,6 +210,7 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
return 0;
}
+ALLOW_ERROR_INJECTION(xe_sync_entry_parse, ERRNO);
int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job)
{
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index dda5268507d8..0771acbbf367 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -3,6 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/fault-inject.h>
+
#include <drm/drm_managed.h>
#include "xe_device.h"
@@ -11,6 +13,7 @@
#include "xe_migrate.h"
#include "xe_pcode.h"
#include "xe_sa.h"
+#include "xe_svm.h"
#include "xe_tile.h"
#include "xe_tile_sysfs.h"
#include "xe_ttm_vram_mgr.h"
@@ -92,10 +95,6 @@ static int xe_tile_alloc(struct xe_tile *tile)
return -ENOMEM;
tile->mem.ggtt->tile = tile;
- tile->mem.vram_mgr = drmm_kzalloc(drm, sizeof(*tile->mem.vram_mgr), GFP_KERNEL);
- if (!tile->mem.vram_mgr)
- return -ENOMEM;
-
return 0;
}
@@ -129,6 +128,7 @@ int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id)
return 0;
}
+ALLOW_ERROR_INJECTION(xe_tile_init_early, ERRNO); /* See xe_pci_probe() */
static int tile_ttm_mgr_init(struct xe_tile *tile)
{
@@ -136,7 +136,7 @@ static int tile_ttm_mgr_init(struct xe_tile *tile)
int err;
if (tile->mem.vram.usable_size) {
- err = xe_ttm_vram_mgr_init(tile, tile->mem.vram_mgr);
+ err = xe_ttm_vram_mgr_init(tile, &tile->mem.vram.ttm);
if (err)
return err;
xe->info.mem_region_mask |= BIT(tile->id) << 1;
@@ -161,23 +161,29 @@ static int tile_ttm_mgr_init(struct xe_tile *tile)
*/
int xe_tile_init_noalloc(struct xe_tile *tile)
{
+ struct xe_device *xe = tile_to_xe(tile);
int err;
err = tile_ttm_mgr_init(tile);
if (err)
return err;
+ xe_wa_apply_tile_workarounds(tile);
+
+ if (xe->info.has_usm && IS_DGFX(xe))
+ xe_devm_add(tile, &tile->mem.vram);
+
+ return xe_tile_sysfs_init(tile);
+}
+
+int xe_tile_init(struct xe_tile *tile)
+{
tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16);
if (IS_ERR(tile->mem.kernel_bb_pool))
return PTR_ERR(tile->mem.kernel_bb_pool);
- xe_wa_apply_tile_workarounds(tile);
-
- err = xe_tile_sysfs_init(tile);
-
return 0;
}
-
void xe_tile_migrate_wait(struct xe_tile *tile)
{
xe_migrate_wait(tile->migrate);
diff --git a/drivers/gpu/drm/xe/xe_tile.h b/drivers/gpu/drm/xe/xe_tile.h
index 1c9e42ade6b0..eb939316d55b 100644
--- a/drivers/gpu/drm/xe/xe_tile.h
+++ b/drivers/gpu/drm/xe/xe_tile.h
@@ -12,6 +12,7 @@ struct xe_tile;
int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id);
int xe_tile_init_noalloc(struct xe_tile *tile);
+int xe_tile_init(struct xe_tile *tile);
void xe_tile_migrate_wait(struct xe_tile *tile);
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 8573d7a87d84..b4a3577df70c 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -21,6 +21,7 @@
#include "xe_vm.h"
#define __dev_name_xe(xe) dev_name((xe)->drm.dev)
+#define __dev_name_tile(tile) __dev_name_xe(tile_to_xe((tile)))
#define __dev_name_gt(gt) __dev_name_xe(gt_to_xe((gt)))
#define __dev_name_eq(q) __dev_name_gt((q)->gt)
@@ -210,6 +211,7 @@ DECLARE_EVENT_CLASS(xe_sched_job,
__string(dev, __dev_name_eq(job->q))
__field(u32, seqno)
__field(u32, lrc_seqno)
+ __field(u8, gt_id)
__field(u16, guc_id)
__field(u32, guc_state)
__field(u32, flags)
@@ -222,6 +224,7 @@ DECLARE_EVENT_CLASS(xe_sched_job,
__assign_str(dev);
__entry->seqno = xe_sched_job_seqno(job);
__entry->lrc_seqno = xe_sched_job_lrc_seqno(job);
+ __entry->gt_id = job->q->gt->info.id;
__entry->guc_id = job->q->guc->id;
__entry->guc_state =
atomic_read(&job->q->guc->state);
@@ -231,9 +234,9 @@ DECLARE_EVENT_CLASS(xe_sched_job,
__entry->batch_addr = (u64)job->ptrs[0].batch_addr;
),
- TP_printk("dev=%s, fence=%p, seqno=%u, lrc_seqno=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
+ TP_printk("dev=%s, fence=%p, seqno=%u, lrc_seqno=%u, gt=%u, guc_id=%d, batch_addr=0x%012llx, guc_state=0x%x, flags=0x%x, error=%d",
__get_str(dev), __entry->fence, __entry->seqno,
- __entry->lrc_seqno, __entry->guc_id,
+ __entry->lrc_seqno, __entry->gt_id, __entry->guc_id,
__entry->batch_addr, __entry->guc_state,
__entry->flags, __entry->error)
);
@@ -281,6 +284,7 @@ DECLARE_EVENT_CLASS(xe_sched_msg,
__string(dev, __dev_name_eq(((struct xe_exec_queue *)msg->private_data)))
__field(u32, opcode)
__field(u16, guc_id)
+ __field(u8, gt_id)
),
TP_fast_assign(
@@ -288,9 +292,11 @@ DECLARE_EVENT_CLASS(xe_sched_msg,
__entry->opcode = msg->opcode;
__entry->guc_id =
((struct xe_exec_queue *)msg->private_data)->guc->id;
+ __entry->gt_id =
+ ((struct xe_exec_queue *)msg->private_data)->gt->info.id;
),
- TP_printk("dev=%s, guc_id=%d, opcode=%u", __get_str(dev), __entry->guc_id,
+ TP_printk("dev=%s, gt=%u guc_id=%d, opcode=%u", __get_str(dev), __entry->gt_id, __entry->guc_id,
__entry->opcode)
);
@@ -342,12 +348,12 @@ DEFINE_EVENT(xe_hw_fence, xe_hw_fence_try_signal,
);
TRACE_EVENT(xe_reg_rw,
- TP_PROTO(struct xe_gt *gt, bool write, u32 reg, u64 val, int len),
+ TP_PROTO(struct xe_mmio *mmio, bool write, u32 reg, u64 val, int len),
- TP_ARGS(gt, write, reg, val, len),
+ TP_ARGS(mmio, write, reg, val, len),
TP_STRUCT__entry(
- __string(dev, __dev_name_gt(gt))
+ __string(dev, __dev_name_tile(mmio->tile))
__field(u64, val)
__field(u32, reg)
__field(u16, write)
@@ -421,6 +427,36 @@ DEFINE_EVENT(xe_pm_runtime, xe_pm_runtime_get_ioctl,
TP_ARGS(xe, caller)
);
+TRACE_EVENT(xe_eu_stall_data_read,
+ TP_PROTO(u8 slice, u8 subslice,
+ u32 read_ptr, u32 write_ptr,
+ size_t read_size, size_t total_size),
+ TP_ARGS(slice, subslice,
+ read_ptr, write_ptr,
+ read_size, total_size),
+
+ TP_STRUCT__entry(__field(u8, slice)
+ __field(u8, subslice)
+ __field(u32, read_ptr)
+ __field(u32, write_ptr)
+ __field(size_t, read_size)
+ __field(size_t, total_size)
+ ),
+
+ TP_fast_assign(__entry->slice = slice;
+ __entry->subslice = subslice;
+ __entry->read_ptr = read_ptr;
+ __entry->write_ptr = write_ptr;
+ __entry->read_size = read_size;
+ __entry->total_size = total_size;
+ ),
+
+ TP_printk("slice: %u subslice: %u read ptr: 0x%x write ptr: 0x%x read size: %zu total read size: %zu",
+ __entry->slice, __entry->subslice,
+ __entry->read_ptr, __entry->write_ptr,
+ __entry->read_size, __entry->total_size)
+);
+
#endif
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h
index 9b1a1d4304ae..ccebd5f0878e 100644
--- a/drivers/gpu/drm/xe/xe_trace_bo.h
+++ b/drivers/gpu/drm/xe/xe_trace_bo.h
@@ -48,6 +48,16 @@ DEFINE_EVENT(xe_bo, xe_bo_cpu_fault,
TP_ARGS(bo)
);
+DEFINE_EVENT(xe_bo, xe_bo_validate,
+ TP_PROTO(struct xe_bo *bo),
+ TP_ARGS(bo)
+);
+
+DEFINE_EVENT(xe_bo, xe_bo_create,
+ TP_PROTO(struct xe_bo *bo),
+ TP_ARGS(bo)
+);
+
TRACE_EVENT(xe_bo_move,
TP_PROTO(struct xe_bo *bo, uint32_t new_placement, uint32_t old_placement,
bool move_lacks_source),
@@ -55,8 +65,8 @@ TRACE_EVENT(xe_bo_move,
TP_STRUCT__entry(
__field(struct xe_bo *, bo)
__field(size_t, size)
- __field(u32, new_placement)
- __field(u32, old_placement)
+ __string(new_placement_name, xe_mem_type_to_name[new_placement])
+ __string(old_placement_name, xe_mem_type_to_name[old_placement])
__string(device_id, __dev_name_bo(bo))
__field(bool, move_lacks_source)
),
@@ -64,15 +74,15 @@ TRACE_EVENT(xe_bo_move,
TP_fast_assign(
__entry->bo = bo;
__entry->size = bo->size;
- __entry->new_placement = new_placement;
- __entry->old_placement = old_placement;
+ __assign_str(new_placement_name);
+ __assign_str(old_placement_name);
__assign_str(device_id);
__entry->move_lacks_source = move_lacks_source;
),
TP_printk("move_lacks_source:%s, migrate object %p [size %zu] from %s to %s device_id:%s",
__entry->move_lacks_source ? "yes" : "no", __entry->bo, __entry->size,
- xe_mem_type_to_name[__entry->old_placement],
- xe_mem_type_to_name[__entry->new_placement], __get_str(device_id))
+ __get_str(old_placement_name),
+ __get_str(new_placement_name), __get_str(device_id))
);
DECLARE_EVENT_CLASS(xe_vma,
@@ -82,6 +92,7 @@ DECLARE_EVENT_CLASS(xe_vma,
TP_STRUCT__entry(
__string(dev, __dev_name_vma(vma))
__field(struct xe_vma *, vma)
+ __field(struct xe_vm *, vm)
__field(u32, asid)
__field(u64, start)
__field(u64, end)
@@ -91,14 +102,16 @@ DECLARE_EVENT_CLASS(xe_vma,
TP_fast_assign(
__assign_str(dev);
__entry->vma = vma;
+ __entry->vm = xe_vma_vm(vma);
__entry->asid = xe_vma_vm(vma)->usm.asid;
__entry->start = xe_vma_start(vma);
__entry->end = xe_vma_end(vma) - 1;
__entry->ptr = xe_vma_userptr(vma);
),
- TP_printk("dev=%s, vma=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx,",
- __get_str(dev), __entry->vma, __entry->asid, __entry->start,
+ TP_printk("dev=%s, vma=%p, vm=%p, asid=0x%05x, start=0x%012llx, end=0x%012llx, userptr=0x%012llx",
+ __get_str(dev), __entry->vma, __entry->vm,
+ __entry->asid, __entry->start,
__entry->end, __entry->ptr)
)
@@ -180,16 +193,19 @@ DECLARE_EVENT_CLASS(xe_vm,
__string(dev, __dev_name_vm(vm))
__field(struct xe_vm *, vm)
__field(u32, asid)
+ __field(u32, flags)
),
TP_fast_assign(
__assign_str(dev);
__entry->vm = vm;
__entry->asid = vm->usm.asid;
+ __entry->flags = vm->flags;
),
- TP_printk("dev=%s, vm=%p, asid=0x%05x", __get_str(dev),
- __entry->vm, __entry->asid)
+ TP_printk("dev=%s, vm=%p, asid=0x%05x, vm flags=0x%05x",
+ __get_str(dev), __entry->vm, __entry->asid,
+ __entry->flags)
);
DEFINE_EVENT(xe_vm, xe_vm_kill,
diff --git a/drivers/gpu/drm/xe/xe_trace_guc.h b/drivers/gpu/drm/xe/xe_trace_guc.h
index 23abdd55dc62..78949db9cfce 100644
--- a/drivers/gpu/drm/xe/xe_trace_guc.h
+++ b/drivers/gpu/drm/xe/xe_trace_guc.h
@@ -14,6 +14,7 @@
#include "xe_device_types.h"
#include "xe_guc_exec_queue_types.h"
+#include "xe_guc_engine_activity_types.h"
#define __dev_name_xe(xe) dev_name((xe)->drm.dev)
@@ -100,6 +101,54 @@ DEFINE_EVENT_PRINT(xe_guc_ctb, xe_guc_ctb_g2h,
);
+TRACE_EVENT(xe_guc_engine_activity,
+ TP_PROTO(struct xe_device *xe, struct engine_activity *ea, const char *name,
+ u16 instance),
+ TP_ARGS(xe, ea, name, instance),
+
+ TP_STRUCT__entry(
+ __string(dev, __dev_name_xe(xe))
+ __string(name, name)
+ __field(u32, global_change_num)
+ __field(u32, guc_tsc_frequency_hz)
+ __field(u32, lag_latency_usec)
+ __field(u16, instance)
+ __field(u16, change_num)
+ __field(u16, quanta_ratio)
+ __field(u32, last_update_tick)
+ __field(u64, active_ticks)
+ __field(u64, active)
+ __field(u64, total)
+ __field(u64, quanta)
+ __field(u64, last_cpu_ts)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev);
+ __assign_str(name);
+ __entry->global_change_num = ea->metadata.global_change_num;
+ __entry->guc_tsc_frequency_hz = ea->metadata.guc_tsc_frequency_hz;
+ __entry->lag_latency_usec = ea->metadata.lag_latency_usec;
+ __entry->instance = instance;
+ __entry->change_num = ea->activity.change_num;
+ __entry->quanta_ratio = ea->activity.quanta_ratio;
+ __entry->last_update_tick = ea->activity.last_update_tick;
+ __entry->active_ticks = ea->activity.active_ticks;
+ __entry->active = ea->active;
+ __entry->total = ea->total;
+ __entry->quanta = ea->quanta;
+ __entry->last_cpu_ts = ea->last_cpu_ts;
+ ),
+
+ TP_printk("dev=%s engine %s:%d Active=%llu, quanta=%llu, last_cpu_ts=%llu\n"
+ "Activity metadata: global_change_num=%u, guc_tsc_frequency_hz=%u lag_latency_usec=%u\n"
+ "Activity data: change_num=%u, quanta_ratio=0x%x, last_update_tick=%u, active_ticks=%llu\n",
+ __get_str(dev), __get_str(name), __entry->instance,
+ (__entry->active + __entry->total), __entry->quanta, __entry->last_cpu_ts,
+ __entry->global_change_num, __entry->guc_tsc_frequency_hz,
+ __entry->lag_latency_usec, __entry->change_num, __entry->quanta_ratio,
+ __entry->last_update_tick, __entry->active_ticks)
+);
#endif
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/xe/xe_trace_lrc.c b/drivers/gpu/drm/xe/xe_trace_lrc.c
new file mode 100644
index 000000000000..ab9b7e2970bc
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_trace_lrc.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "xe_trace_lrc.h"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_trace_lrc.h b/drivers/gpu/drm/xe/xe_trace_lrc.h
new file mode 100644
index 000000000000..d525cbee1e34
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_trace_lrc.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM xe
+
+#if !defined(_XE_TRACE_LRC_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _XE_TRACE_LRC_H_
+
+#include <linux/tracepoint.h>
+#include <linux/types.h>
+
+#include "xe_gt_types.h"
+#include "xe_lrc.h"
+#include "xe_lrc_types.h"
+
+#define __dev_name_lrc(lrc) dev_name(gt_to_xe((lrc)->fence_ctx.gt)->drm.dev)
+
+TRACE_EVENT(xe_lrc_update_timestamp,
+ TP_PROTO(struct xe_lrc *lrc, uint64_t old),
+ TP_ARGS(lrc, old),
+ TP_STRUCT__entry(
+ __field(struct xe_lrc *, lrc)
+ __field(u64, old)
+ __field(u64, new)
+ __string(name, lrc->fence_ctx.name)
+ __string(device_id, __dev_name_lrc(lrc))
+ ),
+
+ TP_fast_assign(
+ __entry->lrc = lrc;
+ __entry->old = old;
+ __entry->new = lrc->ctx_timestamp;
+ __assign_str(name);
+ __assign_str(device_id);
+ ),
+ TP_printk("lrc=:%p lrc->name=%s old=%llu new=%llu device_id:%s",
+ __entry->lrc, __get_str(name),
+ __entry->old, __entry->new,
+ __get_str(device_id))
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/xe
+#define TRACE_INCLUDE_FILE xe_trace_lrc
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index f7113cf6109d..d9c9d2547aad 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -57,12 +57,35 @@ bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe)
return GRAPHICS_VERx100(xe) < 1270 && !IS_DGFX(xe);
}
+static u32 get_wopcm_size(struct xe_device *xe)
+{
+ u32 wopcm_size;
+ u64 val;
+
+ val = xe_mmio_read64_2x32(xe_root_tile_mmio(xe), STOLEN_RESERVED);
+ val = REG_FIELD_GET64(WOPCM_SIZE_MASK, val);
+
+ switch (val) {
+ case 0x5 ... 0x6:
+ val--;
+ fallthrough;
+ case 0x0 ... 0x3:
+ wopcm_size = (1U << val) * SZ_1M;
+ break;
+ default:
+ WARN(1, "Missing case wopcm_size=%llx\n", val);
+ wopcm_size = 0;
+ }
+
+ return wopcm_size;
+}
+
static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
{
struct xe_tile *tile = xe_device_get_root_tile(xe);
- struct xe_gt *mmio = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
- u64 stolen_size;
+ u64 stolen_size, wopcm_size;
u64 tile_offset;
u64 tile_size;
@@ -74,7 +97,13 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
if (drm_WARN_ON(&xe->drm, tile_size < mgr->stolen_base))
return 0;
+ /* Carve out the top of DSM as it contains the reserved WOPCM region */
+ wopcm_size = get_wopcm_size(xe);
+ if (drm_WARN_ON(&xe->drm, !wopcm_size))
+ return 0;
+
stolen_size = tile_size - mgr->stolen_base;
+ stolen_size -= wopcm_size;
/* Verify usage fits in the actual resource available */
if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, LMEM_BAR))
@@ -89,29 +118,6 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
return ALIGN_DOWN(stolen_size, SZ_1M);
}
-static u32 get_wopcm_size(struct xe_device *xe)
-{
- u32 wopcm_size;
- u64 val;
-
- val = xe_mmio_read64_2x32(xe_root_mmio_gt(xe), STOLEN_RESERVED);
- val = REG_FIELD_GET64(WOPCM_SIZE_MASK, val);
-
- switch (val) {
- case 0x5 ... 0x6:
- val--;
- fallthrough;
- case 0x0 ... 0x3:
- wopcm_size = (1U << val) * SZ_1M;
- break;
- default:
- WARN(1, "Missing case wopcm_size=%llx\n", val);
- wopcm_size = 0;
- }
-
- return wopcm_size;
-}
-
static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -119,7 +125,7 @@ static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr
u32 stolen_size, wopcm_size;
u32 ggc, gms;
- ggc = xe_mmio_read32(xe_root_mmio_gt(xe), GGC);
+ ggc = xe_mmio_read32(xe_root_tile_mmio(xe), GGC);
/*
* Check GGMS: it should be fixed 0x3 (8MB), which corresponds to the
@@ -159,7 +165,7 @@ static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr
stolen_size -= wopcm_size;
if (media_gt && XE_WA(media_gt, 14019821291)) {
- u64 gscpsmi_base = xe_mmio_read64_2x32(media_gt, GSCPSMI_BASE)
+ u64 gscpsmi_base = xe_mmio_read64_2x32(&media_gt->mmio, GSCPSMI_BASE)
& ~GENMASK_ULL(5, 0);
/*
@@ -201,17 +207,16 @@ static u64 detect_stolen(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
#endif
}
-void xe_ttm_stolen_mgr_init(struct xe_device *xe)
+int xe_ttm_stolen_mgr_init(struct xe_device *xe)
{
- struct xe_ttm_stolen_mgr *mgr = drmm_kzalloc(&xe->drm, sizeof(*mgr), GFP_KERNEL);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct xe_ttm_stolen_mgr *mgr;
u64 stolen_size, io_size;
int err;
- if (!mgr) {
- drm_dbg_kms(&xe->drm, "Stolen mgr init failed\n");
- return;
- }
+ mgr = drmm_kzalloc(&xe->drm, sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
if (IS_SRIOV_VF(xe))
stolen_size = 0;
@@ -224,7 +229,7 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
if (!stolen_size) {
drm_dbg_kms(&xe->drm, "No stolen memory support\n");
- return;
+ return 0;
}
/*
@@ -240,7 +245,7 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
io_size, PAGE_SIZE);
if (err) {
drm_dbg_kms(&xe->drm, "Stolen mgr init failed: %i\n", err);
- return;
+ return err;
}
drm_dbg_kms(&xe->drm, "Initialized stolen memory support with %llu bytes\n",
@@ -248,6 +253,8 @@ void xe_ttm_stolen_mgr_init(struct xe_device *xe)
if (io_size)
mgr->mapping = devm_ioremap_wc(&pdev->dev, mgr->io_base, io_size);
+
+ return 0;
}
u64 xe_ttm_stolen_io_offset(struct xe_bo *bo, u32 offset)
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h
index 1777245ff810..8e877d1e839b 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.h
@@ -12,7 +12,7 @@ struct ttm_resource;
struct xe_bo;
struct xe_device;
-void xe_ttm_stolen_mgr_init(struct xe_device *xe);
+int xe_ttm_stolen_mgr_init(struct xe_device *xe);
int xe_ttm_stolen_io_mem_reserve(struct xe_device *xe, struct ttm_resource *mem);
bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe);
u64 xe_ttm_stolen_io_offset(struct xe_bo *bo, u32 offset);
diff --git a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
index 9844a8edbfe1..d38b91872da3 100644
--- a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
@@ -108,9 +108,8 @@ int xe_ttm_sys_mgr_init(struct xe_device *xe)
u64 gtt_size;
si_meminfo(&si);
+ /* Potentially restrict amount of TT memory here. */
gtt_size = (u64)si.totalram * si.mem_unit;
- /* TTM limits allocation of all TTM devices by 50% of system memory */
- gtt_size /= 2;
man->use_tt = true;
man->func = &xe_ttm_sys_mgr_func;
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
index 423b261ea743..9e375a40aee9 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
@@ -5,6 +5,7 @@
*/
#include <drm/drm_managed.h>
+#include <drm/drm_drv.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
@@ -52,7 +53,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man);
struct xe_ttm_vram_mgr_resource *vres;
struct drm_buddy *mm = &mgr->mm;
- u64 size, remaining_size, min_page_size;
+ u64 size, min_page_size;
unsigned long lpfn;
int err;
@@ -98,17 +99,6 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
goto error_fini;
}
- if (WARN_ON(min_page_size > SZ_2G)) { /* FIXME: sg limit */
- err = -EINVAL;
- goto error_fini;
- }
-
- if (WARN_ON((size > SZ_2G &&
- (vres->base.placement & TTM_PL_FLAG_CONTIGUOUS)))) {
- err = -EINVAL;
- goto error_fini;
- }
-
if (WARN_ON(!IS_ALIGNED(size, min_page_size))) {
err = -EINVAL;
goto error_fini;
@@ -116,12 +106,11 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
mutex_lock(&mgr->lock);
if (lpfn <= mgr->visible_size >> PAGE_SHIFT && size > mgr->visible_avail) {
- mutex_unlock(&mgr->lock);
err = -ENOSPC;
- goto error_fini;
+ goto error_unlock;
}
- if (place->fpfn + (size >> PAGE_SHIFT) != place->lpfn &&
+ if (place->fpfn + (size >> PAGE_SHIFT) != lpfn &&
place->flags & TTM_PL_FLAG_CONTIGUOUS) {
size = roundup_pow_of_two(size);
min_page_size = size;
@@ -129,25 +118,11 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
lpfn = max_t(unsigned long, place->fpfn + (size >> PAGE_SHIFT), lpfn);
}
- remaining_size = size;
- do {
- /*
- * Limit maximum size to 2GiB due to SG table limitations.
- * FIXME: Should maybe be handled as part of sg construction.
- */
- u64 alloc_size = min_t(u64, remaining_size, SZ_2G);
-
- err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
- (u64)lpfn << PAGE_SHIFT,
- alloc_size,
- min_page_size,
- &vres->blocks,
- vres->flags);
- if (err)
- goto error_free_blocks;
-
- remaining_size -= alloc_size;
- } while (remaining_size);
+ err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
+ (u64)lpfn << PAGE_SHIFT, size,
+ min_page_size, &vres->blocks, vres->flags);
+ if (err)
+ goto error_unlock;
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
if (!drm_buddy_block_trim(mm, NULL, vres->base.size, &vres->blocks))
@@ -194,9 +169,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
*res = &vres->base;
return 0;
-
-error_free_blocks:
- drm_buddy_free_list(mm, &vres->blocks, 0);
+error_unlock:
mutex_unlock(&mgr->lock);
error_fini:
ttm_resource_fini(man, &vres->base);
@@ -339,6 +312,13 @@ int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr,
struct ttm_resource_manager *man = &mgr->manager;
int err;
+ if (mem_type != XE_PL_STOLEN) {
+ const char *name = mem_type == XE_PL_VRAM0 ? "vram0" : "vram1";
+ man->cg = drmm_cgroup_register_region(&xe->drm, name, size);
+ if (IS_ERR(man->cg))
+ return PTR_ERR(man->cg);
+ }
+
man->func = &xe_ttm_vram_mgr_func;
mgr->mem_type = mem_type;
mutex_init(&mgr->lock);
@@ -360,9 +340,8 @@ int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr,
int xe_ttm_vram_mgr_init(struct xe_tile *tile, struct xe_ttm_vram_mgr *mgr)
{
struct xe_device *xe = tile_to_xe(tile);
- struct xe_mem_region *vram = &tile->mem.vram;
+ struct xe_vram_region *vram = &tile->mem.vram;
- mgr->vram = vram;
return __xe_ttm_vram_mgr_init(xe, mgr, XE_PL_VRAM0 + tile->id,
vram->usable_size, vram->io_size,
PAGE_SIZE);
@@ -393,7 +372,8 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
xe_res_first(res, offset, length, &cursor);
while (cursor.remaining) {
num_entries++;
- xe_res_next(&cursor, cursor.size);
+ /* Limit maximum size to 2GiB due to SG table limitations. */
+ xe_res_next(&cursor, min_t(u64, cursor.size, SZ_2G));
}
r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
@@ -413,7 +393,7 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
xe_res_first(res, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) {
phys_addr_t phys = cursor.start + tile->mem.vram.io_start;
- size_t size = cursor.size;
+ size_t size = min_t(u64, cursor.size, SZ_2G);
dma_addr_t addr;
addr = dma_map_resource(dev, phys, size, dir,
@@ -426,7 +406,7 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
sg_dma_address(sg) = addr;
sg_dma_len(sg) = size;
- xe_res_next(&cursor, cursor.size);
+ xe_res_next(&cursor, size);
}
return 0;
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h b/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h
index 2d75cf126289..1144f9232ebb 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h
@@ -9,8 +9,6 @@
#include <drm/drm_buddy.h>
#include <drm/ttm/ttm_device.h>
-struct xe_mem_region;
-
/**
* struct xe_ttm_vram_mgr - XE TTM VRAM manager
*
@@ -21,8 +19,6 @@ struct xe_ttm_vram_mgr {
struct ttm_resource_manager manager;
/** @mm: DRM buddy allocator which manages the VRAM */
struct drm_buddy mm;
- /** @vram: ptr to details of associated VRAM region */
- struct xe_mem_region *vram;
/** @visible_size: Proped size of the CPU visible portion */
u64 visible_size;
/** @visible_avail: CPU visible portion still unallocated */
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index 0d5e04158917..49ddbda7cdef 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -7,6 +7,8 @@
#include <kunit/visibility.h>
+#include <drm/drm_managed.h>
+
#include "regs/xe_gt_regs.h"
#include "xe_gt_types.h"
#include "xe_platform_types.h"
@@ -33,7 +35,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
},
{ XE_RTP_NAME("Tuning: L3 cache - media"),
- XE_RTP_RULES(MEDIA_VERSION(2000)),
+ XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(FIELD_SET(XE2LPM_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
},
@@ -43,7 +45,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
SET(CCCHKNREG1, L3CMPCTRL))
},
{ XE_RTP_NAME("Tuning: Compression Overfetch - media"),
- XE_RTP_RULES(MEDIA_VERSION(2000)),
+ XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(CLR(XE2LPM_CCCHKNREG1, ENCOMPPERFFIX),
SET(XE2LPM_CCCHKNREG1, L3CMPCTRL))
},
@@ -52,7 +54,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
XE_RTP_ACTIONS(SET(L3SQCREG3, COMPPWOVERFETCHEN))
},
{ XE_RTP_NAME("Tuning: Enable compressible partial write overfetch in L3 - media"),
- XE_RTP_RULES(MEDIA_VERSION(2000)),
+ XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG3, COMPPWOVERFETCHEN))
},
{ XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only"),
@@ -61,7 +63,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
COMPMEMRD256BOVRFETCHEN))
},
{ XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only - media"),
- XE_RTP_RULES(MEDIA_VERSION(2000)),
+ XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG2,
COMPMEMRD256BOVRFETCHEN))
},
@@ -71,7 +73,7 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0)))
},
{ XE_RTP_NAME("Tuning: Stateless compression control - media"),
- XE_RTP_RULES(MEDIA_VERSION_RANGE(1301, 2000)),
+ XE_RTP_RULES(MEDIA_VERSION_RANGE(1301, XE_RTP_END_VERSION_UNDEFINED)),
XE_RTP_ACTIONS(FIELD_SET(STATELESS_COMPRESSION_CTRL, UNIFIED_COMPRESSION_FORMAT,
REG_FIELD_PREP(UNIFIED_COMPRESSION_FORMAT, 0)))
},
@@ -83,28 +85,22 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
XE_RTP_RULES(MEDIA_VERSION(2000)),
XE_RTP_ACTIONS(SET(XE2LPM_SCRATCH3_LBCF, RWFLUSHALLEN))
},
-
- {}
};
static const struct xe_rtp_entry_sr engine_tunings[] = {
+ { XE_RTP_NAME("Tuning: L3 Hashing Mask"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(CLR(XELP_GARBCNTL, XELP_BUS_HASH_CTL_BIT_EXC))
+ },
{ XE_RTP_NAME("Tuning: Set Indirect State Override"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1274),
ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(SAMPLER_MODE, INDIRECT_STATE_BASE_ADDR_OVERRIDE))
},
- {}
};
static const struct xe_rtp_entry_sr lrc_tunings[] = {
- { XE_RTP_NAME("Tuning: ganged timer, also known as 16011163337"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
- /* read verification is ignored due to 1608008084. */
- XE_RTP_ACTIONS(FIELD_SET_NO_READ_MASK(FF_MODE2,
- FF_MODE2_GS_TIMER_MASK,
- FF_MODE2_GS_TIMER_224))
- },
-
/* DG2 */
{ XE_RTP_NAME("Tuning: L3 cache"),
@@ -139,15 +135,47 @@ static const struct xe_rtp_entry_sr lrc_tunings[] = {
XE_RTP_ACTIONS(FIELD_SET(FF_MODE, VS_HIT_MAX_VALUE_MASK,
REG_FIELD_PREP(VS_HIT_MAX_VALUE_MASK, 0x3f)))
},
-
- {}
};
+/**
+ * xe_tuning_init - initialize gt with tunings bookkeeping
+ * @gt: GT instance to initialize
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_tuning_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ size_t n_lrc, n_engine, n_gt, total;
+ unsigned long *p;
+
+ n_gt = BITS_TO_LONGS(ARRAY_SIZE(gt_tunings));
+ n_engine = BITS_TO_LONGS(ARRAY_SIZE(engine_tunings));
+ n_lrc = BITS_TO_LONGS(ARRAY_SIZE(lrc_tunings));
+ total = n_gt + n_engine + n_lrc;
+
+ p = drmm_kzalloc(&xe->drm, sizeof(*p) * total, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ gt->tuning_active.gt = p;
+ p += n_gt;
+ gt->tuning_active.engine = p;
+ p += n_engine;
+ gt->tuning_active.lrc = p;
+
+ return 0;
+}
+ALLOW_ERROR_INJECTION(xe_tuning_init, ERRNO); /* See xe_pci_probe() */
+
void xe_tuning_process_gt(struct xe_gt *gt)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
- xe_rtp_process_to_sr(&ctx, gt_tunings, &gt->reg_sr);
+ xe_rtp_process_ctx_enable_active_tracking(&ctx,
+ gt->tuning_active.gt,
+ ARRAY_SIZE(gt_tunings));
+ xe_rtp_process_to_sr(&ctx, gt_tunings, ARRAY_SIZE(gt_tunings), &gt->reg_sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_tuning_process_gt);
@@ -155,7 +183,11 @@ void xe_tuning_process_engine(struct xe_hw_engine *hwe)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
- xe_rtp_process_to_sr(&ctx, engine_tunings, &hwe->reg_sr);
+ xe_rtp_process_ctx_enable_active_tracking(&ctx,
+ hwe->gt->tuning_active.engine,
+ ARRAY_SIZE(engine_tunings));
+ xe_rtp_process_to_sr(&ctx, engine_tunings, ARRAY_SIZE(engine_tunings),
+ &hwe->reg_sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_tuning_process_engine);
@@ -171,5 +203,25 @@ void xe_tuning_process_lrc(struct xe_hw_engine *hwe)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
- xe_rtp_process_to_sr(&ctx, lrc_tunings, &hwe->reg_lrc);
+ xe_rtp_process_ctx_enable_active_tracking(&ctx,
+ hwe->gt->tuning_active.lrc,
+ ARRAY_SIZE(lrc_tunings));
+ xe_rtp_process_to_sr(&ctx, lrc_tunings, ARRAY_SIZE(lrc_tunings), &hwe->reg_lrc);
+}
+
+void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p)
+{
+ size_t idx;
+
+ drm_printf(p, "GT Tunings\n");
+ for_each_set_bit(idx, gt->tuning_active.gt, ARRAY_SIZE(gt_tunings))
+ drm_printf_indent(p, 1, "%s\n", gt_tunings[idx].name);
+
+ drm_printf(p, "\nEngine Tunings\n");
+ for_each_set_bit(idx, gt->tuning_active.engine, ARRAY_SIZE(engine_tunings))
+ drm_printf_indent(p, 1, "%s\n", engine_tunings[idx].name);
+
+ drm_printf(p, "\nLRC Tunings\n");
+ for_each_set_bit(idx, gt->tuning_active.lrc, ARRAY_SIZE(lrc_tunings))
+ drm_printf_indent(p, 1, "%s\n", lrc_tunings[idx].name);
}
diff --git a/drivers/gpu/drm/xe/xe_tuning.h b/drivers/gpu/drm/xe/xe_tuning.h
index 4f9c3ac3b516..dd0d3ccc9c65 100644
--- a/drivers/gpu/drm/xe/xe_tuning.h
+++ b/drivers/gpu/drm/xe/xe_tuning.h
@@ -6,11 +6,14 @@
#ifndef _XE_TUNING_
#define _XE_TUNING_
+struct drm_printer;
struct xe_gt;
struct xe_hw_engine;
+int xe_tuning_init(struct xe_gt *gt);
void xe_tuning_process_gt(struct xe_gt *gt);
void xe_tuning_process_engine(struct xe_hw_engine *hwe);
void xe_tuning_process_lrc(struct xe_hw_engine *hwe);
+void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p);
#endif
diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c
index 0d073a9987c2..3a8751a8b92d 100644
--- a/drivers/gpu/drm/xe/xe_uc.c
+++ b/drivers/gpu/drm/xe/xe_uc.c
@@ -14,6 +14,7 @@
#include "xe_gt_sriov_vf.h"
#include "xe_guc.h"
#include "xe_guc_pc.h"
+#include "xe_guc_engine_activity.h"
#include "xe_huc.h"
#include "xe_sriov.h"
#include "xe_uc_fw.h"
@@ -210,6 +211,8 @@ int xe_uc_init_hw(struct xe_uc *uc)
if (ret)
return ret;
+ xe_guc_engine_activity_enable_stats(&uc->guc);
+
/* We don't fail the driver load if HuC fails to auth, but let's warn */
ret = xe_huc_auth(&uc->huc, XE_HUC_AUTH_VIA_GUC);
xe_gt_assert(uc_to_gt(uc), !ret);
@@ -241,7 +244,7 @@ void xe_uc_gucrc_disable(struct xe_uc *uc)
void xe_uc_stop_prepare(struct xe_uc *uc)
{
- xe_gsc_wait_for_worker_completion(&uc->gsc);
+ xe_gsc_stop_prepare(&uc->gsc);
xe_guc_stop_prepare(&uc->guc);
}
@@ -275,6 +278,12 @@ again:
goto again;
}
+void xe_uc_suspend_prepare(struct xe_uc *uc)
+{
+ xe_gsc_wait_for_worker_completion(&uc->gsc);
+ xe_guc_stop_prepare(&uc->guc);
+}
+
int xe_uc_suspend(struct xe_uc *uc)
{
/* GuC submission not enabled, nothing to do */
@@ -289,19 +298,6 @@ int xe_uc_suspend(struct xe_uc *uc)
}
/**
- * xe_uc_remove() - Clean up the UC structures before driver removal
- * @uc: the UC object
- *
- * This function should only act on objects/structures that must be cleaned
- * before the driver removal callback is complete and therefore can't be
- * deferred to a drmm action.
- */
-void xe_uc_remove(struct xe_uc *uc)
-{
- xe_gsc_remove(&uc->gsc);
-}
-
-/**
* xe_uc_declare_wedged() - Declare UC wedged
* @uc: the UC object
*
diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h
index 506517c11333..c23e6f5e2514 100644
--- a/drivers/gpu/drm/xe/xe_uc.h
+++ b/drivers/gpu/drm/xe/xe_uc.h
@@ -18,9 +18,9 @@ int xe_uc_reset_prepare(struct xe_uc *uc);
void xe_uc_stop_prepare(struct xe_uc *uc);
void xe_uc_stop(struct xe_uc *uc);
int xe_uc_start(struct xe_uc *uc);
+void xe_uc_suspend_prepare(struct xe_uc *uc);
int xe_uc_suspend(struct xe_uc *uc);
int xe_uc_sanitize_reset(struct xe_uc *uc);
-void xe_uc_remove(struct xe_uc *uc);
void xe_uc_declare_wedged(struct xe_uc *uc);
#endif
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index d431d0031185..2741849bbf4d 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -4,6 +4,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/fault-inject.h>
#include <linux/firmware.h>
#include <drm/drm_managed.h>
@@ -91,6 +92,8 @@
struct uc_fw_entry {
enum xe_platform platform;
+ enum xe_gt_type gt_type;
+
struct {
const char *path;
u16 major;
@@ -105,32 +108,37 @@ struct fw_blobs_by_type {
u32 count;
};
-#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
- fw_def(BATTLEMAGE, major_ver(xe, guc, bmg, 70, 29, 2)) \
- fw_def(LUNARLAKE, major_ver(xe, guc, lnl, 70, 29, 2)) \
- fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 29, 2)) \
- fw_def(DG2, major_ver(i915, guc, dg2, 70, 29, 2)) \
- fw_def(DG1, major_ver(i915, guc, dg1, 70, 29, 2)) \
- fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 29, 2)) \
- fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 29, 2)) \
- fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 29, 2)) \
- fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 29, 2)) \
- fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 29, 2))
+/*
+ * Add an "ANY" define just to convey the meaning it's given here.
+ */
+#define XE_GT_TYPE_ANY XE_GT_TYPE_UNINITIALIZED
+
+#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
+ fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 44, 1)) \
+ fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 44, 1)) \
+ fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, guc, mtl, 70, 44, 1)) \
+ fw_def(DG2, GT_TYPE_ANY, major_ver(i915, guc, dg2, 70, 44, 1)) \
+ fw_def(DG1, GT_TYPE_ANY, major_ver(i915, guc, dg1, 70, 44, 1)) \
+ fw_def(ALDERLAKE_N, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \
+ fw_def(ALDERLAKE_P, GT_TYPE_ANY, major_ver(i915, guc, adlp, 70, 44, 1)) \
+ fw_def(ALDERLAKE_S, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \
+ fw_def(ROCKETLAKE, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \
+ fw_def(TIGERLAKE, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1))
#define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \
- fw_def(BATTLEMAGE, no_ver(xe, huc, bmg)) \
- fw_def(LUNARLAKE, no_ver(xe, huc, lnl)) \
- fw_def(METEORLAKE, no_ver(i915, huc_gsc, mtl)) \
- fw_def(DG1, no_ver(i915, huc, dg1)) \
- fw_def(ALDERLAKE_P, no_ver(i915, huc, tgl)) \
- fw_def(ALDERLAKE_S, no_ver(i915, huc, tgl)) \
- fw_def(ROCKETLAKE, no_ver(i915, huc, tgl)) \
- fw_def(TIGERLAKE, no_ver(i915, huc, tgl))
+ fw_def(BATTLEMAGE, GT_TYPE_ANY, no_ver(xe, huc, bmg)) \
+ fw_def(LUNARLAKE, GT_TYPE_ANY, no_ver(xe, huc, lnl)) \
+ fw_def(METEORLAKE, GT_TYPE_ANY, no_ver(i915, huc_gsc, mtl)) \
+ fw_def(DG1, GT_TYPE_ANY, no_ver(i915, huc, dg1)) \
+ fw_def(ALDERLAKE_P, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \
+ fw_def(ALDERLAKE_S, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \
+ fw_def(ROCKETLAKE, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \
+ fw_def(TIGERLAKE, GT_TYPE_ANY, no_ver(i915, huc, tgl))
/* for the GSC FW we match the compatibility version and not the release one */
#define XE_GSC_FIRMWARE_DEFS(fw_def, major_ver) \
- fw_def(LUNARLAKE, major_ver(xe, gsc, lnl, 104, 1, 0)) \
- fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 102, 1, 0))
+ fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, gsc, lnl, 104, 1, 0)) \
+ fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, gsc, mtl, 102, 1, 0))
#define MAKE_FW_PATH(dir__, uc__, shortname__, version__) \
__stringify(dir__) "/" __stringify(shortname__) "_" __stringify(uc__) version__ ".bin"
@@ -158,12 +166,13 @@ struct fw_blobs_by_type {
a, b, c }
/* All blobs need to be declared via MODULE_FIRMWARE() */
-#define XE_UC_MODULE_FIRMWARE(platform__, fw_filename) \
+#define XE_UC_MODULE_FIRMWARE(platform__, gt_type__, fw_filename) \
MODULE_FIRMWARE(fw_filename);
-#define XE_UC_FW_ENTRY(platform__, entry__) \
+#define XE_UC_FW_ENTRY(platform__, gt_type__, entry__) \
{ \
.platform = XE_ ## platform__, \
+ .gt_type = XE_ ## gt_type__, \
entry__, \
},
@@ -221,30 +230,38 @@ uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw)
[XE_UC_FW_TYPE_HUC] = { entries_huc, ARRAY_SIZE(entries_huc) },
[XE_UC_FW_TYPE_GSC] = { entries_gsc, ARRAY_SIZE(entries_gsc) },
};
- static const struct uc_fw_entry *entries;
+ struct xe_gt *gt = uc_fw_to_gt(uc_fw);
enum xe_platform p = xe->info.platform;
+ const struct uc_fw_entry *entries;
u32 count;
int i;
- xe_assert(xe, uc_fw->type < ARRAY_SIZE(blobs_all));
+ xe_gt_assert(gt, uc_fw->type < ARRAY_SIZE(blobs_all));
+ xe_gt_assert(gt, gt->info.type != XE_GT_TYPE_UNINITIALIZED);
+
entries = blobs_all[uc_fw->type].entries;
count = blobs_all[uc_fw->type].count;
for (i = 0; i < count && p <= entries[i].platform; i++) {
- if (p == entries[i].platform) {
- uc_fw->path = entries[i].path;
- uc_fw->versions.wanted.major = entries[i].major;
- uc_fw->versions.wanted.minor = entries[i].minor;
- uc_fw->versions.wanted.patch = entries[i].patch;
- uc_fw->full_ver_required = entries[i].full_ver_required;
-
- if (uc_fw->type == XE_UC_FW_TYPE_GSC)
- uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY;
- else
- uc_fw->versions.wanted_type = XE_UC_FW_VER_RELEASE;
-
- break;
- }
+ if (p != entries[i].platform)
+ continue;
+
+ if (entries[i].gt_type != XE_GT_TYPE_ANY &&
+ entries[i].gt_type != gt->info.type)
+ continue;
+
+ uc_fw->path = entries[i].path;
+ uc_fw->versions.wanted.major = entries[i].major;
+ uc_fw->versions.wanted.minor = entries[i].minor;
+ uc_fw->versions.wanted.patch = entries[i].patch;
+ uc_fw->full_ver_required = entries[i].full_ver_required;
+
+ if (uc_fw->type == XE_UC_FW_TYPE_GSC)
+ uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY;
+ else
+ uc_fw->versions.wanted_type = XE_UC_FW_VER_RELEASE;
+
+ break;
}
}
@@ -796,6 +813,7 @@ int xe_uc_fw_init(struct xe_uc_fw *uc_fw)
return err;
}
+ALLOW_ERROR_INJECTION(xe_uc_fw_init, ERRNO); /* See xe_pci_probe() */
static u32 uc_fw_ggtt_offset(struct xe_uc_fw *uc_fw)
{
@@ -806,6 +824,7 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags)
{
struct xe_device *xe = uc_fw_to_xe(uc_fw);
struct xe_gt *gt = uc_fw_to_gt(uc_fw);
+ struct xe_mmio *mmio = &gt->mmio;
u64 src_offset;
u32 dma_ctrl;
int ret;
@@ -814,34 +833,34 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags)
/* Set the source address for the uCode */
src_offset = uc_fw_ggtt_offset(uc_fw) + uc_fw->css_offset;
- xe_mmio_write32(gt, DMA_ADDR_0_LOW, lower_32_bits(src_offset));
- xe_mmio_write32(gt, DMA_ADDR_0_HIGH,
+ xe_mmio_write32(mmio, DMA_ADDR_0_LOW, lower_32_bits(src_offset));
+ xe_mmio_write32(mmio, DMA_ADDR_0_HIGH,
upper_32_bits(src_offset) | DMA_ADDRESS_SPACE_GGTT);
/* Set the DMA destination */
- xe_mmio_write32(gt, DMA_ADDR_1_LOW, offset);
- xe_mmio_write32(gt, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+ xe_mmio_write32(mmio, DMA_ADDR_1_LOW, offset);
+ xe_mmio_write32(mmio, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
/*
* Set the transfer size. The header plus uCode will be copied to WOPCM
* via DMA, excluding any other components
*/
- xe_mmio_write32(gt, DMA_COPY_SIZE,
+ xe_mmio_write32(mmio, DMA_COPY_SIZE,
sizeof(struct uc_css_header) + uc_fw->ucode_size);
/* Start the DMA */
- xe_mmio_write32(gt, DMA_CTRL,
+ xe_mmio_write32(mmio, DMA_CTRL,
_MASKED_BIT_ENABLE(dma_flags | START_DMA));
/* Wait for DMA to finish */
- ret = xe_mmio_wait32(gt, DMA_CTRL, START_DMA, 0, 100000, &dma_ctrl,
+ ret = xe_mmio_wait32(mmio, DMA_CTRL, START_DMA, 0, 100000, &dma_ctrl,
false);
if (ret)
drm_err(&xe->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
xe_uc_fw_type_repr(uc_fw->type), dma_ctrl);
/* Disable the bits once DMA is over */
- xe_mmio_write32(gt, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
+ xe_mmio_write32(mmio, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h
index 0d8caa0e7354..ad3b35a0e6eb 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_types.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h
@@ -92,7 +92,7 @@ struct xe_uc_fw {
const enum xe_uc_fw_status status;
/**
* @__status: private firmware load status - only to be used
- * by firmware laoding code
+ * by firmware loading code
*/
enum xe_uc_fw_status __status;
};
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index c99380271de6..861577746929 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -8,9 +8,9 @@
#include <linux/dma-fence-array.h>
#include <linux/nospec.h>
+#include <drm/drm_drv.h>
#include <drm/drm_exec.h>
#include <drm/drm_print.h>
-#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_tt.h>
#include <uapi/drm/xe_drm.h>
#include <linux/ascii85.h>
@@ -34,7 +34,9 @@
#include "xe_pm.h"
#include "xe_preempt_fence.h"
#include "xe_pt.h"
+#include "xe_pxp.h"
#include "xe_res_cursor.h"
+#include "xe_svm.h"
#include "xe_sync.h"
#include "xe_trace_bo.h"
#include "xe_wa.h"
@@ -270,6 +272,7 @@ out_up_write:
return err;
}
+ALLOW_ERROR_INJECTION(xe_vm_add_compute_exec_queue, ERRNO);
/**
* xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM
@@ -580,51 +583,26 @@ out_unlock_outer:
trace_xe_vm_rebind_worker_exit(vm);
}
-static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
- const struct mmu_notifier_range *range,
- unsigned long cur_seq)
+static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
{
- struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
- struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
+ struct xe_userptr *userptr = &uvma->userptr;
struct xe_vma *vma = &uvma->vma;
- struct xe_vm *vm = xe_vma_vm(vma);
struct dma_resv_iter cursor;
struct dma_fence *fence;
long err;
- xe_assert(vm->xe, xe_vma_is_userptr(vma));
- trace_xe_vma_userptr_invalidate(vma);
-
- if (!mmu_notifier_range_blockable(range))
- return false;
-
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
- "NOTIFIER: addr=0x%016llx, range=0x%016llx",
- xe_vma_start(vma), xe_vma_size(vma));
-
- down_write(&vm->userptr.notifier_lock);
- mmu_interval_set_seq(mni, cur_seq);
-
- /* No need to stop gpu access if the userptr is not yet bound. */
- if (!userptr->initial_bind) {
- up_write(&vm->userptr.notifier_lock);
- return true;
- }
-
/*
* Tell exec and rebind worker they need to repin and rebind this
* userptr.
*/
if (!xe_vm_in_fault_mode(vm) &&
- !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
+ !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
spin_lock(&vm->userptr.invalidated_lock);
list_move_tail(&userptr->invalidate_link,
&vm->userptr.invalidated);
spin_unlock(&vm->userptr.invalidated_lock);
}
- up_write(&vm->userptr.notifier_lock);
-
/*
* Preempt fences turn into schedule disables, pipeline these.
* Note that even in fault mode, we need to wait for binds and
@@ -642,11 +620,37 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
false, MAX_SCHEDULE_TIMEOUT);
XE_WARN_ON(err <= 0);
- if (xe_vm_in_fault_mode(vm)) {
+ if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
err = xe_vm_invalidate_vma(vma);
XE_WARN_ON(err);
}
+ xe_hmm_userptr_unmap(uvma);
+}
+
+static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
+ struct xe_vma *vma = &uvma->vma;
+ struct xe_vm *vm = xe_vma_vm(vma);
+
+ xe_assert(vm->xe, xe_vma_is_userptr(vma));
+ trace_xe_vma_userptr_invalidate(vma);
+
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "NOTIFIER: addr=0x%016llx, range=0x%016llx",
+ xe_vma_start(vma), xe_vma_size(vma));
+
+ down_write(&vm->userptr.notifier_lock);
+ mmu_interval_set_seq(mni, cur_seq);
+
+ __vma_userptr_invalidate(vm, uvma);
+ up_write(&vm->userptr.notifier_lock);
trace_xe_vma_userptr_invalidate_complete(vma);
return true;
@@ -656,31 +660,71 @@ static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
.invalidate = vma_userptr_invalidate,
};
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+/**
+ * xe_vma_userptr_force_invalidate() - force invalidate a userptr
+ * @uvma: The userptr vma to invalidate
+ *
+ * Perform a forced userptr invalidation for testing purposes.
+ */
+void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
+{
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+
+ /* Protect against concurrent userptr pinning */
+ lockdep_assert_held(&vm->lock);
+ /* Protect against concurrent notifiers */
+ lockdep_assert_held(&vm->userptr.notifier_lock);
+ /*
+ * Protect against concurrent instances of this function and
+ * the critical exec sections
+ */
+ xe_vm_assert_held(vm);
+
+ if (!mmu_interval_read_retry(&uvma->userptr.notifier,
+ uvma->userptr.notifier_seq))
+ uvma->userptr.notifier_seq -= 2;
+ __vma_userptr_invalidate(vm, uvma);
+}
+#endif
+
int xe_vm_userptr_pin(struct xe_vm *vm)
{
struct xe_userptr_vma *uvma, *next;
int err = 0;
- LIST_HEAD(tmp_evict);
xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
lockdep_assert_held_write(&vm->lock);
/* Collect invalidated userptrs */
spin_lock(&vm->userptr.invalidated_lock);
+ xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
userptr.invalidate_link) {
list_del_init(&uvma->userptr.invalidate_link);
- list_move_tail(&uvma->userptr.repin_link,
- &vm->userptr.repin_list);
+ list_add_tail(&uvma->userptr.repin_link,
+ &vm->userptr.repin_list);
}
spin_unlock(&vm->userptr.invalidated_lock);
- /* Pin and move to temporary list */
+ /* Pin and move to bind list */
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
userptr.repin_link) {
err = xe_vma_userptr_pin_pages(uvma);
if (err == -EFAULT) {
list_del_init(&uvma->userptr.repin_link);
+ /*
+ * We might have already done the pin once already, but
+ * then had to retry before the re-bind happened, due
+ * some other condition in the caller, but in the
+ * meantime the userptr got dinged by the notifier such
+ * that we need to revalidate here, but this time we hit
+ * the EFAULT. In such a case make sure we remove
+ * ourselves from the rebind list to avoid going down in
+ * flames.
+ */
+ if (!list_empty(&uvma->vma.combined_links.rebind))
+ list_del_init(&uvma->vma.combined_links.rebind);
/* Wait for pending binds */
xe_vm_lock(vm, false);
@@ -691,10 +735,10 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
err = xe_vm_invalidate_vma(&uvma->vma);
xe_vm_unlock(vm);
if (err)
- return err;
+ break;
} else {
- if (err < 0)
- return err;
+ if (err)
+ break;
list_del_init(&uvma->userptr.repin_link);
list_move_tail(&uvma->vma.combined_links.rebind,
@@ -702,7 +746,19 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
}
}
- return 0;
+ if (err) {
+ down_write(&vm->userptr.notifier_lock);
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
+ userptr.repin_link) {
+ list_del_init(&uvma->userptr.repin_link);
+ list_move_tail(&uvma->userptr.invalidate_link,
+ &vm->userptr.invalidated);
+ }
+ spin_unlock(&vm->userptr.invalidated_lock);
+ up_write(&vm->userptr.notifier_lock);
+ }
+ return err;
}
/**
@@ -733,13 +789,14 @@ static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
vops->pt_update_ops[i].ops =
kmalloc_array(vops->pt_update_ops[i].num_ops,
sizeof(*vops->pt_update_ops[i].ops),
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!vops->pt_update_ops[i].ops)
return array_of_binds ? -ENOBUFS : -ENOMEM;
}
return 0;
}
+ALLOW_ERROR_INJECTION(xe_vma_ops_alloc, ERRNO);
static void xe_vma_ops_fini(struct xe_vma_ops *vops)
{
@@ -893,6 +950,179 @@ free_ops:
return fence;
}
+static void xe_vm_populate_range_rebind(struct xe_vma_op *op,
+ struct xe_vma *vma,
+ struct xe_svm_range *range,
+ u8 tile_mask)
+{
+ INIT_LIST_HEAD(&op->link);
+ op->tile_mask = tile_mask;
+ op->base.op = DRM_GPUVA_OP_DRIVER;
+ op->subop = XE_VMA_SUBOP_MAP_RANGE;
+ op->map_range.vma = vma;
+ op->map_range.range = range;
+}
+
+static int
+xe_vm_ops_add_range_rebind(struct xe_vma_ops *vops,
+ struct xe_vma *vma,
+ struct xe_svm_range *range,
+ u8 tile_mask)
+{
+ struct xe_vma_op *op;
+
+ op = kzalloc(sizeof(*op), GFP_KERNEL);
+ if (!op)
+ return -ENOMEM;
+
+ xe_vm_populate_range_rebind(op, vma, range, tile_mask);
+ list_add_tail(&op->link, &vops->list);
+ xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
+
+ return 0;
+}
+
+/**
+ * xe_vm_range_rebind() - VM range (re)bind
+ * @vm: The VM which the range belongs to.
+ * @vma: The VMA which the range belongs to.
+ * @range: SVM range to rebind.
+ * @tile_mask: Tile mask to bind the range to.
+ *
+ * (re)bind SVM range setting up GPU page tables for the range.
+ *
+ * Return: dma fence for rebind to signal completion on succees, ERR_PTR on
+ * failure
+ */
+struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
+ struct xe_vma *vma,
+ struct xe_svm_range *range,
+ u8 tile_mask)
+{
+ struct dma_fence *fence = NULL;
+ struct xe_vma_ops vops;
+ struct xe_vma_op *op, *next_op;
+ struct xe_tile *tile;
+ u8 id;
+ int err;
+
+ lockdep_assert_held(&vm->lock);
+ xe_vm_assert_held(vm);
+ xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
+ xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
+
+ xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+ for_each_tile(tile, vm->xe, id) {
+ vops.pt_update_ops[id].wait_vm_bookkeep = true;
+ vops.pt_update_ops[tile->id].q =
+ xe_tile_migrate_exec_queue(tile);
+ }
+
+ err = xe_vm_ops_add_range_rebind(&vops, vma, range, tile_mask);
+ if (err)
+ return ERR_PTR(err);
+
+ err = xe_vma_ops_alloc(&vops, false);
+ if (err) {
+ fence = ERR_PTR(err);
+ goto free_ops;
+ }
+
+ fence = ops_execute(vm, &vops);
+
+free_ops:
+ list_for_each_entry_safe(op, next_op, &vops.list, link) {
+ list_del(&op->link);
+ kfree(op);
+ }
+ xe_vma_ops_fini(&vops);
+
+ return fence;
+}
+
+static void xe_vm_populate_range_unbind(struct xe_vma_op *op,
+ struct xe_svm_range *range)
+{
+ INIT_LIST_HEAD(&op->link);
+ op->tile_mask = range->tile_present;
+ op->base.op = DRM_GPUVA_OP_DRIVER;
+ op->subop = XE_VMA_SUBOP_UNMAP_RANGE;
+ op->unmap_range.range = range;
+}
+
+static int
+xe_vm_ops_add_range_unbind(struct xe_vma_ops *vops,
+ struct xe_svm_range *range)
+{
+ struct xe_vma_op *op;
+
+ op = kzalloc(sizeof(*op), GFP_KERNEL);
+ if (!op)
+ return -ENOMEM;
+
+ xe_vm_populate_range_unbind(op, range);
+ list_add_tail(&op->link, &vops->list);
+ xe_vma_ops_incr_pt_update_ops(vops, range->tile_present);
+
+ return 0;
+}
+
+/**
+ * xe_vm_range_unbind() - VM range unbind
+ * @vm: The VM which the range belongs to.
+ * @range: SVM range to rebind.
+ *
+ * Unbind SVM range removing the GPU page tables for the range.
+ *
+ * Return: dma fence for unbind to signal completion on succees, ERR_PTR on
+ * failure
+ */
+struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
+ struct xe_svm_range *range)
+{
+ struct dma_fence *fence = NULL;
+ struct xe_vma_ops vops;
+ struct xe_vma_op *op, *next_op;
+ struct xe_tile *tile;
+ u8 id;
+ int err;
+
+ lockdep_assert_held(&vm->lock);
+ xe_vm_assert_held(vm);
+ xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
+
+ if (!range->tile_present)
+ return dma_fence_get_stub();
+
+ xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+ for_each_tile(tile, vm->xe, id) {
+ vops.pt_update_ops[id].wait_vm_bookkeep = true;
+ vops.pt_update_ops[tile->id].q =
+ xe_tile_migrate_exec_queue(tile);
+ }
+
+ err = xe_vm_ops_add_range_unbind(&vops, range);
+ if (err)
+ return ERR_PTR(err);
+
+ err = xe_vma_ops_alloc(&vops, false);
+ if (err) {
+ fence = ERR_PTR(err);
+ goto free_ops;
+ }
+
+ fence = ops_execute(vm, &vops);
+
+free_ops:
+ list_for_each_entry_safe(op, next_op, &vops.list, link) {
+ list_del(&op->link);
+ kfree(op);
+ }
+ xe_vma_ops_fini(&vops);
+
+ return fence;
+}
+
static void xe_vma_free(struct xe_vma *vma)
{
if (xe_vma_is_userptr(vma))
@@ -901,9 +1131,10 @@ static void xe_vma_free(struct xe_vma *vma)
kfree(vma);
}
-#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
-#define VMA_CREATE_FLAG_IS_NULL BIT(1)
-#define VMA_CREATE_FLAG_DUMPABLE BIT(2)
+#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
+#define VMA_CREATE_FLAG_IS_NULL BIT(1)
+#define VMA_CREATE_FLAG_DUMPABLE BIT(2)
+#define VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR BIT(3)
static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
@@ -917,6 +1148,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
+ bool is_cpu_addr_mirror =
+ (flags & VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR);
xe_assert(vm->xe, start < end);
xe_assert(vm->xe, end < vm->size);
@@ -925,7 +1158,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
* Allocate and ensure that the xe_vma_is_userptr() return
* matches what was allocated.
*/
- if (!bo && !is_null) {
+ if (!bo && !is_null && !is_cpu_addr_mirror) {
struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
if (!uvma)
@@ -937,6 +1170,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
if (!vma)
return ERR_PTR(-ENOMEM);
+ if (is_cpu_addr_mirror)
+ vma->gpuva.flags |= XE_VMA_SYSTEM_ALLOCATOR;
if (is_null)
vma->gpuva.flags |= DRM_GPUVA_SPARSE;
if (bo)
@@ -979,7 +1214,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
drm_gpuva_link(&vma->gpuva, vm_bo);
drm_gpuvm_bo_put(vm_bo);
} else /* userptr or null */ {
- if (!is_null) {
+ if (!is_null && !is_cpu_addr_mirror) {
struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
u64 size = end - start + 1;
int err;
@@ -987,6 +1222,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
INIT_LIST_HEAD(&userptr->invalidate_link);
INIT_LIST_HEAD(&userptr->repin_link);
vma->gpuva.gem.offset = bo_offset_or_userptr;
+ mutex_init(&userptr->unmap_mutex);
err = mmu_interval_notifier_insert(&userptr->notifier,
current->mm,
@@ -1024,12 +1260,13 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
/*
* Since userptr pages are not pinned, we can't remove
- * the notifer until we're sure the GPU is not accessing
+ * the notifier until we're sure the GPU is not accessing
* them anymore
*/
mmu_interval_notifier_remove(&userptr->notifier);
+ mutex_destroy(&userptr->unmap_mutex);
xe_vm_put(vm);
- } else if (xe_vma_is_null(vma)) {
+ } else if (xe_vma_is_null(vma) || xe_vma_is_cpu_addr_mirror(vma)) {
xe_vm_put(vm);
} else {
xe_bo_put(xe_vma_bo(vma));
@@ -1066,9 +1303,10 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
spin_lock(&vm->userptr.invalidated_lock);
+ xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link));
list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock);
- } else if (!xe_vma_is_null(vma)) {
+ } else if (!xe_vma_is_null(vma) && !xe_vma_is_cpu_addr_mirror(vma)) {
xe_bo_assert_held(xe_vma_bo(vma));
drm_gpuva_unlink(&vma->gpuva);
@@ -1352,6 +1590,7 @@ static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
return 0;
}
+ALLOW_ERROR_INJECTION(xe_vm_create_scratch, ERRNO);
static void xe_vm_free_scratch(struct xe_vm *vm)
{
@@ -1381,6 +1620,12 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
struct xe_tile *tile;
u8 id;
+ /*
+ * Since the GSCCS is not user-accessible, we don't expect a GSC VM to
+ * ever be in faulting mode.
+ */
+ xe_assert(xe, !((flags & XE_VM_FLAG_GSC) && (flags & XE_VM_FLAG_FAULT_MODE)));
+
vm = kzalloc(sizeof(*vm), GFP_KERNEL);
if (!vm)
return ERR_PTR(-ENOMEM);
@@ -1391,7 +1636,21 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
vm->flags = flags;
- init_rwsem(&vm->lock);
+ /**
+ * GSC VMs are kernel-owned, only used for PXP ops and can sometimes be
+ * manipulated under the PXP mutex. However, the PXP mutex can be taken
+ * under a user-VM lock when the PXP session is started at exec_queue
+ * creation time. Those are different VMs and therefore there is no risk
+ * of deadlock, but we need to tell lockdep that this is the case or it
+ * will print a warning.
+ */
+ if (flags & XE_VM_FLAG_GSC) {
+ static struct lock_class_key gsc_vm_key;
+
+ __init_rwsem(&vm->lock, "gsc_vm", &gsc_vm_key);
+ } else {
+ init_rwsem(&vm->lock);
+ }
mutex_init(&vm->snap_mutex);
INIT_LIST_HEAD(&vm->rebind_list);
@@ -1419,13 +1678,21 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
* scheduler drops all the references of it, hence protecting the VM
* for this case is necessary.
*/
- if (flags & XE_VM_FLAG_LR_MODE)
+ if (flags & XE_VM_FLAG_LR_MODE) {
+ INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
xe_pm_runtime_get_noresume(xe);
+ }
+
+ if (flags & XE_VM_FLAG_FAULT_MODE) {
+ err = xe_svm_init(vm);
+ if (err)
+ goto err_no_resv;
+ }
vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
if (!vm_resv_obj) {
err = -ENOMEM;
- goto err_no_resv;
+ goto err_svm_fini;
}
drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm,
@@ -1465,10 +1732,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
vm->batch_invalidate_tlb = true;
}
- if (vm->flags & XE_VM_FLAG_LR_MODE) {
- INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
+ if (vm->flags & XE_VM_FLAG_LR_MODE)
vm->batch_invalidate_tlb = false;
- }
/* Fill pt_root after allocating scratch tables */
for_each_tile(tile, xe, id) {
@@ -1511,6 +1776,11 @@ err_close:
xe_vm_close_and_put(vm);
return ERR_PTR(err);
+err_svm_fini:
+ if (flags & XE_VM_FLAG_FAULT_MODE) {
+ vm->size = 0; /* close the vm */
+ xe_svm_fini(vm);
+ }
err_no_resv:
mutex_destroy(&vm->snap_mutex);
for_each_tile(tile, xe, id)
@@ -1524,9 +1794,44 @@ err_no_resv:
static void xe_vm_close(struct xe_vm *vm)
{
+ struct xe_device *xe = vm->xe;
+ bool bound;
+ int idx;
+
+ bound = drm_dev_enter(&xe->drm, &idx);
+
down_write(&vm->lock);
+ if (xe_vm_in_fault_mode(vm))
+ xe_svm_notifier_lock(vm);
+
vm->size = 0;
+
+ if (!((vm->flags & XE_VM_FLAG_MIGRATION))) {
+ struct xe_tile *tile;
+ struct xe_gt *gt;
+ u8 id;
+
+ /* Wait for pending binds */
+ dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+
+ if (bound) {
+ for_each_tile(tile, xe, id)
+ if (vm->pt_root[id])
+ xe_pt_clear(xe, vm->pt_root[id]);
+
+ for_each_gt(gt, xe, id)
+ xe_gt_tlb_invalidation_vm(gt, vm);
+ }
+ }
+
+ if (xe_vm_in_fault_mode(vm))
+ xe_svm_notifier_unlock(vm);
up_write(&vm->lock);
+
+ if (bound)
+ drm_dev_exit(idx);
}
void xe_vm_close_and_put(struct xe_vm *vm)
@@ -1543,6 +1848,8 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_vm_close(vm);
if (xe_vm_in_preempt_fence_mode(vm))
flush_work(&vm->preempt.rebind_work);
+ if (xe_vm_in_fault_mode(vm))
+ xe_svm_close(vm);
down_write(&vm->lock);
for_each_tile(tile, xe, id) {
@@ -1611,6 +1918,9 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_vma_destroy_unlocked(vma);
}
+ if (xe_vm_in_fault_mode(vm))
+ xe_svm_fini(vm);
+
up_write(&vm->lock);
down_write(&xe->usm.lock);
@@ -1744,16 +2054,14 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE &&
- args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
+ args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
+ !xe->info.needs_scratch))
return -EINVAL;
if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) &&
args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE))
return -EINVAL;
- if (XE_IOCTL_DBG(xe, args->extensions))
- return -EINVAL;
-
if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE)
flags |= XE_VM_FLAG_SCRATCH_PAGE;
if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE)
@@ -1899,6 +2207,20 @@ static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
}
#endif
+static bool __xe_vm_needs_clear_scratch_pages(struct xe_vm *vm, u32 bind_flags)
+{
+ if (!xe_vm_in_fault_mode(vm))
+ return false;
+
+ if (!xe_vm_has_scratch(vm))
+ return false;
+
+ if (bind_flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE)
+ return false;
+
+ return true;
+}
+
/*
* Create operations list from IOCTL arguments, setup operations fields so parse
* and commit steps are decoupled from IOCTL arguments. This step can fail.
@@ -1967,8 +2289,12 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
op->map.read_only =
flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
+ op->map.is_cpu_addr_mirror = flags &
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
op->map.pat_index = pat_index;
+ op->map.invalidate_on_bind =
+ __xe_vm_needs_clear_scratch_pages(vm, flags);
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
op->prefetch.region = prefetch_region;
}
@@ -1978,6 +2304,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
return ops;
}
+ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
u16 pat_index, unsigned int flags)
@@ -2105,7 +2432,7 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
}
}
- /* Adjust for partial unbind after removin VMA from VM */
+ /* Adjust for partial unbind after removing VMA from VM */
if (!err) {
op->base.remap.unmap->va->va.addr = op->remap.start;
op->base.remap.unmap->va->va.range = op->remap.range;
@@ -2158,6 +2485,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
VMA_CREATE_FLAG_IS_NULL : 0;
flags |= op->map.dumpable ?
VMA_CREATE_FLAG_DUMPABLE : 0;
+ flags |= op->map.is_cpu_addr_mirror ?
+ VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
vma = new_vma(vm, &op->base.map, op->map.pat_index,
flags);
@@ -2165,7 +2494,9 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
return PTR_ERR(vma);
op->map.vma = vma;
- if (op->map.immediate || !xe_vm_in_fault_mode(vm))
+ if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
+ !op->map.is_cpu_addr_mirror) ||
+ op->map.invalidate_on_bind)
xe_vma_ops_incr_pt_update_ops(vops,
op->tile_mask);
break;
@@ -2174,21 +2505,35 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
{
struct xe_vma *old =
gpuva_to_vma(op->base.remap.unmap->va);
+ bool skip = xe_vma_is_cpu_addr_mirror(old);
+ u64 start = xe_vma_start(old), end = xe_vma_end(old);
+
+ if (op->base.remap.prev)
+ start = op->base.remap.prev->va.addr +
+ op->base.remap.prev->va.range;
+ if (op->base.remap.next)
+ end = op->base.remap.next->va.addr;
+
+ if (xe_vma_is_cpu_addr_mirror(old) &&
+ xe_svm_has_mapping(vm, start, end))
+ return -EBUSY;
op->remap.start = xe_vma_start(old);
op->remap.range = xe_vma_size(old);
- if (op->base.remap.prev) {
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_READ_ONLY ?
- VMA_CREATE_FLAG_READ_ONLY : 0;
- flags |= op->base.remap.unmap->va->flags &
- DRM_GPUVA_SPARSE ?
- VMA_CREATE_FLAG_IS_NULL : 0;
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_DUMPABLE ?
- VMA_CREATE_FLAG_DUMPABLE : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ XE_VMA_READ_ONLY ?
+ VMA_CREATE_FLAG_READ_ONLY : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ DRM_GPUVA_SPARSE ?
+ VMA_CREATE_FLAG_IS_NULL : 0;
+ flags |= op->base.remap.unmap->va->flags &
+ XE_VMA_DUMPABLE ?
+ VMA_CREATE_FLAG_DUMPABLE : 0;
+ flags |= xe_vma_is_cpu_addr_mirror(old) ?
+ VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
+ if (op->base.remap.prev) {
vma = new_vma(vm, op->base.remap.prev,
old->pat_index, flags);
if (IS_ERR(vma))
@@ -2200,9 +2545,10 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
* Userptr creates a new SG mapping so
* we must also rebind.
*/
- op->remap.skip_prev = !xe_vma_is_userptr(old) &&
+ op->remap.skip_prev = skip ||
+ (!xe_vma_is_userptr(old) &&
IS_ALIGNED(xe_vma_end(vma),
- xe_vma_max_pte_size(old));
+ xe_vma_max_pte_size(old)));
if (op->remap.skip_prev) {
xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
op->remap.range -=
@@ -2218,16 +2564,6 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
}
if (op->base.remap.next) {
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_READ_ONLY ?
- VMA_CREATE_FLAG_READ_ONLY : 0;
- flags |= op->base.remap.unmap->va->flags &
- DRM_GPUVA_SPARSE ?
- VMA_CREATE_FLAG_IS_NULL : 0;
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_DUMPABLE ?
- VMA_CREATE_FLAG_DUMPABLE : 0;
-
vma = new_vma(vm, op->base.remap.next,
old->pat_index, flags);
if (IS_ERR(vma))
@@ -2239,9 +2575,10 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
* Userptr creates a new SG mapping so
* we must also rebind.
*/
- op->remap.skip_next = !xe_vma_is_userptr(old) &&
+ op->remap.skip_next = skip ||
+ (!xe_vma_is_userptr(old) &&
IS_ALIGNED(xe_vma_start(vma),
- xe_vma_max_pte_size(old));
+ xe_vma_max_pte_size(old)));
if (op->remap.skip_next) {
xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old));
op->remap.range -=
@@ -2254,13 +2591,32 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
}
}
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ if (!skip)
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
break;
}
case DRM_GPUVA_OP_UNMAP:
+ vma = gpuva_to_vma(op->base.unmap.va);
+
+ if (xe_vma_is_cpu_addr_mirror(vma) &&
+ xe_svm_has_mapping(vm, xe_vma_start(vma),
+ xe_vma_end(vma)))
+ return -EBUSY;
+
+ if (!xe_vma_is_cpu_addr_mirror(vma))
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ break;
case DRM_GPUVA_OP_PREFETCH:
- /* FIXME: Need to skip some prefetch ops */
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ vma = gpuva_to_vma(op->base.prefetch.va);
+
+ if (xe_vma_is_userptr(vma)) {
+ err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
+ if (err)
+ return err;
+ }
+
+ if (!xe_vma_is_cpu_addr_mirror(vma))
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
@@ -2357,13 +2713,15 @@ static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
bool validate)
{
struct xe_bo *bo = xe_vma_bo(vma);
+ struct xe_vm *vm = xe_vma_vm(vma);
int err = 0;
if (bo) {
if (!bo->vm)
err = drm_exec_lock_obj(exec, &bo->ttm.base);
if (!err && validate)
- err = xe_bo_validate(bo, xe_vma_vm(vma), true);
+ err = xe_bo_validate(bo, vm,
+ !xe_vm_in_preempt_fence_mode(vm));
}
return err;
@@ -2391,9 +2749,10 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
- err = vma_lock_and_validate(exec, op->map.vma,
- !xe_vm_in_fault_mode(vm) ||
- op->map.immediate);
+ if (!op->map.invalidate_on_bind)
+ err = vma_lock_and_validate(exec, op->map.vma,
+ !xe_vm_in_fault_mode(vm) ||
+ op->map.immediate);
break;
case DRM_GPUVA_OP_REMAP:
err = check_ufence(gpuva_to_vma(op->base.remap.unmap->va));
@@ -2484,6 +2843,8 @@ static void op_trace(struct xe_vma_op *op)
case DRM_GPUVA_OP_PREFETCH:
trace_xe_vma_bind(gpuva_to_vma(op->base.prefetch.va));
break;
+ case DRM_GPUVA_OP_DRIVER:
+ break;
default:
XE_WARN_ON("NOT POSSIBLE");
}
@@ -2661,14 +3022,15 @@ static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
}
if (ufence)
xe_sync_ufence_put(ufence);
- for (i = 0; i < vops->num_syncs; i++)
- xe_sync_entry_signal(vops->syncs + i, fence);
- xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
- dma_fence_put(fence);
+ if (fence) {
+ for (i = 0; i < vops->num_syncs; i++)
+ xe_sync_entry_signal(vops->syncs + i, fence);
+ xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
+ }
}
-static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
- struct xe_vma_ops *vops)
+static struct dma_fence *vm_bind_ioctl_ops_execute(struct xe_vm *vm,
+ struct xe_vma_ops *vops)
{
struct drm_exec exec;
struct dma_fence *fence;
@@ -2681,12 +3043,15 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
drm_exec_until_all_locked(&exec) {
err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops);
drm_exec_retry_on_contention(&exec);
- if (err)
+ if (err) {
+ fence = ERR_PTR(err);
goto unlock;
+ }
fence = ops_execute(vm, vops);
if (IS_ERR(fence)) {
- err = PTR_ERR(fence);
+ if (PTR_ERR(fence) == -ENODATA)
+ vm_bind_ioctl_ops_fini(vm, vops, NULL);
goto unlock;
}
@@ -2695,14 +3060,17 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
unlock:
drm_exec_fini(&exec);
- return err;
+ return fence;
}
+ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
#define SUPPORTED_FLAGS_STUB \
(DRM_XE_VM_BIND_FLAG_READONLY | \
DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
DRM_XE_VM_BIND_FLAG_NULL | \
- DRM_XE_VM_BIND_FLAG_DUMPABLE)
+ DRM_XE_VM_BIND_FLAG_DUMPABLE | \
+ DRM_XE_VM_BIND_FLAG_CHECK_PXP | \
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
#ifdef TEST_VM_OPS_ERROR
#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
@@ -2713,7 +3081,7 @@ unlock:
#define XE_64K_PAGE_MASK 0xffffull
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
-static int vm_bind_ioctl_check_args(struct xe_device *xe,
+static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
struct drm_xe_vm_bind *args,
struct drm_xe_vm_bind_op **bind_ops)
{
@@ -2733,13 +3101,14 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
*bind_ops = kvmalloc_array(args->num_binds,
sizeof(struct drm_xe_vm_bind_op),
- GFP_KERNEL | __GFP_ACCOUNT);
+ GFP_KERNEL | __GFP_ACCOUNT |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!*bind_ops)
return args->num_binds > 1 ? -ENOBUFS : -ENOMEM;
- err = __copy_from_user(*bind_ops, bind_user,
- sizeof(struct drm_xe_vm_bind_op) *
- args->num_binds);
+ err = copy_from_user(*bind_ops, bind_user,
+ sizeof(struct drm_xe_vm_bind_op) *
+ args->num_binds);
if (XE_IOCTL_DBG(xe, err)) {
err = -EFAULT;
goto free_bind_ops;
@@ -2757,9 +3126,18 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
u64 obj_offset = (*bind_ops)[i].obj_offset;
u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance;
bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
+ bool is_cpu_addr_mirror = flags &
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
u16 pat_index = (*bind_ops)[i].pat_index;
u16 coh_mode;
+ if (XE_IOCTL_DBG(xe, is_cpu_addr_mirror &&
+ (!xe_vm_in_fault_mode(vm) ||
+ !IS_ENABLED(CONFIG_DRM_XE_GPUSVM)))) {
+ err = -EINVAL;
+ goto free_bind_ops;
+ }
+
if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) {
err = -EINVAL;
goto free_bind_ops;
@@ -2780,13 +3158,14 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
- XE_IOCTL_DBG(xe, obj && is_null) ||
- XE_IOCTL_DBG(xe, obj_offset && is_null) ||
+ XE_IOCTL_DBG(xe, obj && (is_null || is_cpu_addr_mirror)) ||
+ XE_IOCTL_DBG(xe, obj_offset && (is_null ||
+ is_cpu_addr_mirror)) ||
XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP &&
- is_null) ||
+ (is_null || is_cpu_addr_mirror)) ||
XE_IOCTL_DBG(xe, !obj &&
op == DRM_XE_VM_BIND_OP_MAP &&
- !is_null) ||
+ !is_null && !is_cpu_addr_mirror) ||
XE_IOCTL_DBG(xe, !obj &&
op == DRM_XE_VM_BIND_OP_UNMAP_ALL) ||
XE_IOCTL_DBG(xe, addr &&
@@ -2864,7 +3243,7 @@ static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
u64 addr, u64 range, u64 obj_offset,
- u16 pat_index)
+ u16 pat_index, u32 op, u32 bind_flags)
{
u16 coh_mode;
@@ -2888,7 +3267,7 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
- return -EINVAL;
+ return -EINVAL;
}
}
@@ -2896,7 +3275,7 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
if (bo->cpu_caching) {
if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
- return -EINVAL;
+ return -EINVAL;
}
} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
/*
@@ -2905,9 +3284,15 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
* how it was mapped on the CPU. Just assume is it
* potentially cached on CPU side.
*/
- return -EINVAL;
+ return -EINVAL;
}
+ /* If a BO is protected it can only be mapped if the key is still valid */
+ if ((bind_flags & DRM_XE_VM_BIND_FLAG_CHECK_PXP) && xe_bo_is_protected(bo) &&
+ op != DRM_XE_VM_BIND_OP_UNMAP && op != DRM_XE_VM_BIND_OP_UNMAP_ALL)
+ if (XE_IOCTL_DBG(xe, xe_pxp_bo_key_check(xe->pxp, bo) != 0))
+ return -ENOEXEC;
+
return 0;
}
@@ -2925,18 +3310,23 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct xe_sync_entry *syncs = NULL;
struct drm_xe_vm_bind_op *bind_ops;
struct xe_vma_ops vops;
+ struct dma_fence *fence;
int err;
int i;
- err = vm_bind_ioctl_check_args(xe, args, &bind_ops);
+ vm = xe_vm_lookup(xef, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm))
+ return -EINVAL;
+
+ err = vm_bind_ioctl_check_args(xe, vm, args, &bind_ops);
if (err)
- return err;
+ goto put_vm;
if (args->exec_queue_id) {
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
if (XE_IOCTL_DBG(xe, !q)) {
err = -ENOENT;
- goto free_objs;
+ goto put_vm;
}
if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
@@ -2945,15 +3335,12 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
}
- vm = xe_vm_lookup(xef, args->vm_id);
- if (XE_IOCTL_DBG(xe, !vm)) {
- err = -EINVAL;
- goto put_exec_queue;
- }
+ /* Ensure all UNMAPs visible */
+ xe_svm_flush(vm);
err = down_write_killable(&vm->lock);
if (err)
- goto put_vm;
+ goto put_exec_queue;
if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
err = -ENOENT;
@@ -2973,14 +3360,16 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (args->num_binds) {
bos = kvcalloc(args->num_binds, sizeof(*bos),
- GFP_KERNEL | __GFP_ACCOUNT);
+ GFP_KERNEL | __GFP_ACCOUNT |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!bos) {
err = -ENOMEM;
goto release_vm_lock;
}
ops = kvcalloc(args->num_binds, sizeof(*ops),
- GFP_KERNEL | __GFP_ACCOUNT);
+ GFP_KERNEL | __GFP_ACCOUNT |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!ops) {
err = -ENOMEM;
goto release_vm_lock;
@@ -2994,6 +3383,8 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u32 obj = bind_ops[i].obj;
u64 obj_offset = bind_ops[i].obj_offset;
u16 pat_index = bind_ops[i].pat_index;
+ u32 op = bind_ops[i].op;
+ u32 bind_flags = bind_ops[i].flags;
if (!obj)
continue;
@@ -3006,7 +3397,8 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
bos[i] = gem_to_xe_bo(gem_obj);
err = xe_vm_bind_ioctl_validate_bo(xe, bos[i], addr, range,
- obj_offset, pat_index);
+ obj_offset, pat_index, op,
+ bind_flags);
if (err)
goto put_obj;
}
@@ -3087,7 +3479,11 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (err)
goto unwind_ops;
- err = vm_bind_ioctl_ops_execute(vm, &vops);
+ fence = vm_bind_ioctl_ops_execute(vm, &vops);
+ if (IS_ERR(fence))
+ err = PTR_ERR(fence);
+ else
+ dma_fence_put(fence);
unwind_ops:
if (err && err != -ENODATA)
@@ -3108,12 +3504,11 @@ put_obj:
xe_bo_put(bos[i]);
release_vm_lock:
up_write(&vm->lock);
-put_vm:
- xe_vm_put(vm);
put_exec_queue:
if (q)
xe_exec_queue_put(q);
-free_objs:
+put_vm:
+ xe_vm_put(vm);
kvfree(bos);
kvfree(ops);
if (args->num_binds > 1)
@@ -3122,6 +3517,81 @@ free_objs:
}
/**
+ * xe_vm_bind_kernel_bo - bind a kernel BO to a VM
+ * @vm: VM to bind the BO to
+ * @bo: BO to bind
+ * @q: exec queue to use for the bind (optional)
+ * @addr: address at which to bind the BO
+ * @cache_lvl: PAT cache level to use
+ *
+ * Execute a VM bind map operation on a kernel-owned BO to bind it into a
+ * kernel-owned VM.
+ *
+ * Returns a dma_fence to track the binding completion if the job to do so was
+ * successfully submitted, an error pointer otherwise.
+ */
+struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
+ struct xe_exec_queue *q, u64 addr,
+ enum xe_cache_level cache_lvl)
+{
+ struct xe_vma_ops vops;
+ struct drm_gpuva_ops *ops = NULL;
+ struct dma_fence *fence;
+ int err;
+
+ xe_bo_get(bo);
+ xe_vm_get(vm);
+ if (q)
+ xe_exec_queue_get(q);
+
+ down_write(&vm->lock);
+
+ xe_vma_ops_init(&vops, vm, q, NULL, 0);
+
+ ops = vm_bind_ioctl_ops_create(vm, bo, 0, addr, bo->size,
+ DRM_XE_VM_BIND_OP_MAP, 0, 0,
+ vm->xe->pat.idx[cache_lvl]);
+ if (IS_ERR(ops)) {
+ err = PTR_ERR(ops);
+ goto release_vm_lock;
+ }
+
+ err = vm_bind_ioctl_ops_parse(vm, ops, &vops);
+ if (err)
+ goto release_vm_lock;
+
+ xe_assert(vm->xe, !list_empty(&vops.list));
+
+ err = xe_vma_ops_alloc(&vops, false);
+ if (err)
+ goto unwind_ops;
+
+ fence = vm_bind_ioctl_ops_execute(vm, &vops);
+ if (IS_ERR(fence))
+ err = PTR_ERR(fence);
+
+unwind_ops:
+ if (err && err != -ENODATA)
+ vm_bind_ioctl_ops_unwind(vm, &ops, 1);
+
+ xe_vma_ops_fini(&vops);
+ drm_gpuva_ops_free(&vm->gpuvm, ops);
+
+release_vm_lock:
+ up_write(&vm->lock);
+
+ if (q)
+ xe_exec_queue_put(q);
+ xe_vm_put(vm);
+ xe_bo_put(bo);
+
+ if (err)
+ fence = ERR_PTR(err);
+
+ return fence;
+}
+
+/**
* xe_vm_lock() - Lock the vm's dma_resv object
* @vm: The struct xe_vm whose lock is to be locked
* @intr: Whether to perform any wait interruptible
@@ -3170,6 +3640,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
int ret = 0;
xe_assert(xe, !xe_vma_is_null(vma));
+ xe_assert(xe, !xe_vma_is_cpu_addr_mirror(vma));
trace_xe_vma_invalidate(vma);
vm_dbg(&xe_vma_vm(vma)->xe->drm,
@@ -3227,6 +3698,35 @@ wait:
return ret;
}
+int xe_vm_validate_protected(struct xe_vm *vm)
+{
+ struct drm_gpuva *gpuva;
+ int err = 0;
+
+ if (!vm)
+ return -ENODEV;
+
+ mutex_lock(&vm->snap_mutex);
+
+ drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+ struct xe_bo *bo = vma->gpuva.gem.obj ?
+ gem_to_xe_bo(vma->gpuva.gem.obj) : NULL;
+
+ if (!bo)
+ continue;
+
+ if (xe_bo_is_protected(bo)) {
+ err = xe_pxp_bo_key_check(vm->xe->pxp, bo);
+ if (err)
+ break;
+ }
+ }
+
+ mutex_unlock(&vm->snap_mutex);
+ return err;
+}
+
struct xe_vm_snapshot {
unsigned long num_snaps;
struct {
@@ -3303,7 +3803,6 @@ void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
for (int i = 0; i < snap->num_snaps; i++) {
struct xe_bo *bo = snap->snap[i].bo;
- struct iosys_map src;
int err;
if (IS_ERR(snap->snap[i].data))
@@ -3316,16 +3815,8 @@ void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap)
}
if (bo) {
- xe_bo_lock(bo, false);
- err = ttm_bo_vmap(&bo->ttm, &src);
- if (!err) {
- xe_map_memcpy_from(xe_bo_device(bo),
- snap->snap[i].data,
- &src, snap->snap[i].bo_ofs,
- snap->snap[i].len);
- ttm_bo_vunmap(&bo->ttm, &src);
- }
- xe_bo_unlock(bo);
+ err = xe_bo_read(bo, snap->snap[i].bo_ofs,
+ snap->snap[i].data, snap->snap[i].len);
} else {
void __user *userptr = (void __user *)(size_t)snap->snap[i].bo_ofs;
@@ -3379,6 +3870,9 @@ void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p)
}
drm_puts(p, "\n");
+
+ if (drm_coredump_printer_is_full(p))
+ return;
}
}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index c864dba35e1d..494af6bdc646 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -17,11 +17,13 @@ struct drm_printer;
struct drm_file;
struct ttm_buffer_object;
-struct ttm_validate_buffer;
+
+struct dma_fence;
struct xe_exec_queue;
struct xe_file;
struct xe_sync_entry;
+struct xe_svm_range;
struct drm_exec;
struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
@@ -151,6 +153,11 @@ static inline bool xe_vma_is_null(struct xe_vma *vma)
return vma->gpuva.flags & DRM_GPUVA_SPARSE;
}
+static inline bool xe_vma_is_cpu_addr_mirror(struct xe_vma *vma)
+{
+ return vma->gpuva.flags & XE_VMA_SYSTEM_ALLOCATOR;
+}
+
static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
{
return !xe_vma_bo(vma);
@@ -158,7 +165,8 @@ static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
static inline bool xe_vma_is_userptr(struct xe_vma *vma)
{
- return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
+ return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma) &&
+ !xe_vma_is_cpu_addr_mirror(vma);
}
/**
@@ -211,9 +219,17 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm);
int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
u8 tile_mask);
+struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
+ struct xe_vma *vma,
+ struct xe_svm_range *range,
+ u8 tile_mask);
+struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
+ struct xe_svm_range *range);
int xe_vm_invalidate_vma(struct xe_vma *vma);
+int xe_vm_validate_protected(struct xe_vm *vm);
+
static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
{
xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
@@ -248,6 +264,10 @@ int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
unsigned int num_fences);
+struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
+ struct xe_exec_queue *q, u64 addr,
+ enum xe_cache_level cache_lvl);
+
/**
* xe_vm_resv() - Return's the vm's reservation object
* @vm: The vm
@@ -275,9 +295,86 @@ static inline void vm_dbg(const struct drm_device *dev,
const char *format, ...)
{ /* noop */ }
#endif
-#endif
struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
+
+/**
+ * xe_vm_set_validating() - Register this task as currently making bos resident
+ * @allow_res_evict: Allow eviction of buffer objects bound to @vm when
+ * validating.
+ * @vm: Pointer to the vm or NULL.
+ *
+ * Register this task as currently making bos resident for the vm. Intended
+ * to avoid eviction by the same task of shared bos bound to the vm.
+ * Call with the vm's resv lock held.
+ *
+ * Return: A pin cookie that should be used for xe_vm_clear_validating().
+ */
+static inline struct pin_cookie xe_vm_set_validating(struct xe_vm *vm,
+ bool allow_res_evict)
+{
+ struct pin_cookie cookie = {};
+
+ if (vm && !allow_res_evict) {
+ xe_vm_assert_held(vm);
+ cookie = lockdep_pin_lock(&xe_vm_resv(vm)->lock.base);
+ /* Pairs with READ_ONCE in xe_vm_is_validating() */
+ WRITE_ONCE(vm->validating, current);
+ }
+
+ return cookie;
+}
+
+/**
+ * xe_vm_clear_validating() - Unregister this task as currently making bos resident
+ * @vm: Pointer to the vm or NULL
+ * @allow_res_evict: Eviction from @vm was allowed. Must be set to the same
+ * value as for xe_vm_set_validation().
+ * @cookie: Cookie obtained from xe_vm_set_validating().
+ *
+ * Register this task as currently making bos resident for the vm. Intended
+ * to avoid eviction by the same task of shared bos bound to the vm.
+ * Call with the vm's resv lock held.
+ */
+static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict,
+ struct pin_cookie cookie)
+{
+ if (vm && !allow_res_evict) {
+ lockdep_unpin_lock(&xe_vm_resv(vm)->lock.base, cookie);
+ /* Pairs with READ_ONCE in xe_vm_is_validating() */
+ WRITE_ONCE(vm->validating, NULL);
+ }
+}
+
+/**
+ * xe_vm_is_validating() - Whether bos bound to the vm are currently being made resident
+ * by the current task.
+ * @vm: Pointer to the vm.
+ *
+ * If this function returns %true, we should be in a vm resv locked region, since
+ * the current process is the same task that called xe_vm_set_validating().
+ * The function asserts that that's indeed the case.
+ *
+ * Return: %true if the task is currently making bos resident, %false otherwise.
+ */
+static inline bool xe_vm_is_validating(struct xe_vm *vm)
+{
+ /* Pairs with WRITE_ONCE in xe_vm_is_validating() */
+ if (READ_ONCE(vm->validating) == current) {
+ xe_vm_assert_held(vm);
+ return true;
+ }
+ return false;
+}
+
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
+#else
+static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
+{
+}
+#endif
+#endif
diff --git a/drivers/gpu/drm/xe/xe_vm_doc.h b/drivers/gpu/drm/xe/xe_vm_doc.h
index 4d33f310b653..1030ce214032 100644
--- a/drivers/gpu/drm/xe/xe_vm_doc.h
+++ b/drivers/gpu/drm/xe/xe_vm_doc.h
@@ -64,8 +64,8 @@
* update page level 2 PDE[1] to page level 3b phys address (GPU)
*
* bind BO2 0x1ff000-0x201000
- * update page level 3a PTE[511] to BO2 phys addres (GPU)
- * update page level 3b PTE[0] to BO2 phys addres + 0x1000 (GPU)
+ * update page level 3a PTE[511] to BO2 phys address (GPU)
+ * update page level 3b PTE[0] to BO2 phys address + 0x1000 (GPU)
*
* GPU bypass
* ~~~~~~~~~~
@@ -192,7 +192,7 @@
*
* If a VM is in fault mode (TODO: link to fault mode), new bind operations that
* create mappings are by default deferred to the page fault handler (first
- * use). This behavior can be overriden by setting the flag
+ * use). This behavior can be overridden by setting the flag
* DRM_XE_VM_BIND_FLAG_IMMEDIATE which indicates to creating the mapping
* immediately.
*
@@ -209,7 +209,7 @@
*
* Since this a core kernel managed memory the kernel can move this memory
* whenever it wants. We register an invalidation MMU notifier to alert XE when
- * a user poiter is about to move. The invalidation notifier needs to block
+ * a user pointer is about to move. The invalidation notifier needs to block
* until all pending users (jobs or compute mode engines) of the userptr are
* idle to ensure no faults. This done by waiting on all of VM's dma-resv slots.
*
@@ -252,7 +252,7 @@
* Rebind worker
* -------------
*
- * The rebind worker is very similar to an exec. It is resposible for rebinding
+ * The rebind worker is very similar to an exec. It is responsible for rebinding
* evicted BOs or userptrs, waiting on those operations, installing new preempt
* fences, and finally resuming executing of engines in the VM.
*
@@ -317,11 +317,11 @@
* are not allowed, only long running workloads and ULLS are enabled on a faulting
* VM.
*
- * Defered VM binds
+ * Deferred VM binds
* ----------------
*
* By default, on a faulting VM binds just allocate the VMA and the actual
- * updating of the page tables is defered to the page fault handler. This
+ * updating of the page tables is deferred to the page fault handler. This
* behavior can be overridden by setting the flag DRM_XE_VM_BIND_FLAG_IMMEDIATE in
* the VM bind which will then do the bind immediately.
*
@@ -431,7 +431,7 @@
* bind path also acquires this lock in write while the exec / compute mode
* rebind worker acquires this lock in read mode.
*
- * VM dma-resv lock (vm->ttm.base.resv->lock) - WW lock. Protects VM dma-resv
+ * VM dma-resv lock (vm->gpuvm.r_obj->resv->lock) - WW lock. Protects VM dma-resv
* slots which is shared with any private BO in the VM. Expected to be acquired
* during VM binds, execs, and compute mode rebind worker. This lock is also
* held when private BOs are being evicted.
@@ -500,18 +500,18 @@
* Slot waiting
* ------------
*
- * 1. The exection of all jobs from kernel ops shall wait on all slots
+ * 1. The execution of all jobs from kernel ops shall wait on all slots
* (DMA_RESV_USAGE_PREEMPT_FENCE) of either an external BO or VM (depends on if
* kernel op is operating on external or private BO)
*
- * 2. In non-compute mode, the exection of all jobs from rebinds in execs shall
+ * 2. In non-compute mode, the execution of all jobs from rebinds in execs shall
* wait on the DMA_RESV_USAGE_KERNEL slot of either an external BO or VM
* (depends on if the rebind is operatiing on an external or private BO)
*
- * 3. In non-compute mode, the exection of all jobs from execs shall wait on the
+ * 3. In non-compute mode, the execution of all jobs from execs shall wait on the
* last rebind job
*
- * 4. In compute mode, the exection of all jobs from rebinds in the rebind
+ * 4. In compute mode, the execution of all jobs from rebinds in the rebind
* worker shall wait on the DMA_RESV_USAGE_KERNEL slot of either an external BO
* or VM (depends on if rebind is operating on external or private BO)
*
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 7f9a303e51d8..1979e9bdbdf3 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -6,6 +6,7 @@
#ifndef _XE_VM_TYPES_H_
#define _XE_VM_TYPES_H_
+#include <drm/drm_gpusvm.h>
#include <drm/drm_gpuvm.h>
#include <linux/dma-resv.h>
@@ -18,6 +19,7 @@
#include "xe_range_fence.h"
struct xe_bo;
+struct xe_svm_range;
struct xe_sync_entry;
struct xe_user_fence;
struct xe_vm;
@@ -42,6 +44,7 @@ struct xe_vm_pgtable_update_op;
#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 6)
#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 7)
#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8)
+#define XE_VMA_SYSTEM_ALLOCATOR (DRM_GPUVA_USERBITS << 9)
/** struct xe_userptr - User pointer */
struct xe_userptr {
@@ -59,12 +62,16 @@ struct xe_userptr {
struct sg_table *sg;
/** @notifier_seq: notifier sequence number */
unsigned long notifier_seq;
+ /** @unmap_mutex: Mutex protecting dma-unmapping */
+ struct mutex unmap_mutex;
/**
* @initial_bind: user pointer has been bound at least once.
* write: vm->userptr.notifier_lock in read mode and vm->resv held.
* read: vm->userptr.notifier_lock in write mode or vm->resv held.
*/
bool initial_bind;
+ /** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */
+ bool mapped;
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
u32 divisor;
#endif
@@ -139,6 +146,30 @@ struct xe_vm {
/** @gpuvm: base GPUVM used to track VMAs */
struct drm_gpuvm gpuvm;
+ /** @svm: Shared virtual memory state */
+ struct {
+ /** @svm.gpusvm: base GPUSVM used to track fault allocations */
+ struct drm_gpusvm gpusvm;
+ /**
+ * @svm.garbage_collector: Garbage collector which is used unmap
+ * SVM range's GPU bindings and destroy the ranges.
+ */
+ struct {
+ /** @svm.garbage_collector.lock: Protect's range list */
+ spinlock_t lock;
+ /**
+ * @svm.garbage_collector.range_list: List of SVM ranges
+ * in the garbage collector.
+ */
+ struct list_head range_list;
+ /**
+ * @svm.garbage_collector.work: Worker which the
+ * garbage collector runs on.
+ */
+ struct work_struct work;
+ } garbage_collector;
+ } svm;
+
struct xe_device *xe;
/* exec queue used for (un)binding vma's */
@@ -164,6 +195,7 @@ struct xe_vm {
#define XE_VM_FLAG_BANNED BIT(5)
#define XE_VM_FLAG_TILE_ID(flags) FIELD_GET(GENMASK(7, 6), flags)
#define XE_VM_FLAG_SET_TILE_ID(tile) FIELD_PREP(GENMASK(7, 6), (tile)->id)
+#define XE_VM_FLAG_GSC BIT(8)
unsigned long flags;
/** @composite_fence_ctx: context composite fence */
@@ -227,8 +259,8 @@ struct xe_vm {
* up for revalidation. Protected from access with the
* @invalidated_lock. Removing items from the list
* additionally requires @lock in write mode, and adding
- * items to the list requires the @userptr.notifer_lock in
- * write mode.
+ * items to the list requires either the @userptr.notifer_lock in
+ * write mode, OR @lock in write mode.
*/
struct list_head invalidated;
} userptr;
@@ -278,6 +310,14 @@ struct xe_vm {
* protected by the vm resv.
*/
u64 tlb_flush_seqno;
+ /**
+ * @validating: The task that is currently making bos resident for this vm.
+ * Protected by the VM's resv for writing. Opportunistic reading can be done
+ * using READ_ONCE. Note: This is a workaround for the
+ * TTM eviction_valuable() callback not being passed a struct
+ * ttm_operation_context(). Future work might want to address this.
+ */
+ struct task_struct *validating;
/** @batch_invalidate_tlb: Always invalidate TLB before batch start */
bool batch_invalidate_tlb;
/** @xef: XE file handle for tracking this VM's drm client */
@@ -294,8 +334,12 @@ struct xe_vma_op_map {
bool read_only;
/** @is_null: is NULL binding */
bool is_null;
+ /** @is_cpu_addr_mirror: is CPU address mirror binding */
+ bool is_cpu_addr_mirror;
/** @dumpable: whether BO is dumped on GPU hang */
bool dumpable;
+ /** @invalidate: invalidate the VMA before bind */
+ bool invalidate_on_bind;
/** @pat_index: The pat index to use for this operation. */
u16 pat_index;
};
@@ -324,6 +368,20 @@ struct xe_vma_op_prefetch {
u32 region;
};
+/** struct xe_vma_op_map_range - VMA map range operation */
+struct xe_vma_op_map_range {
+ /** @vma: VMA to map (system allocator VMA) */
+ struct xe_vma *vma;
+ /** @range: SVM range to map */
+ struct xe_svm_range *range;
+};
+
+/** struct xe_vma_op_unmap_range - VMA unmap range operation */
+struct xe_vma_op_unmap_range {
+ /** @range: SVM range to unmap */
+ struct xe_svm_range *range;
+};
+
/** enum xe_vma_op_flags - flags for VMA operation */
enum xe_vma_op_flags {
/** @XE_VMA_OP_COMMITTED: VMA operation committed */
@@ -334,6 +392,14 @@ enum xe_vma_op_flags {
XE_VMA_OP_NEXT_COMMITTED = BIT(2),
};
+/** enum xe_vma_subop - VMA sub-operation */
+enum xe_vma_subop {
+ /** @XE_VMA_SUBOP_MAP_RANGE: Map range */
+ XE_VMA_SUBOP_MAP_RANGE,
+ /** @XE_VMA_SUBOP_UNMAP_RANGE: Unmap range */
+ XE_VMA_SUBOP_UNMAP_RANGE,
+};
+
/** struct xe_vma_op - VMA operation */
struct xe_vma_op {
/** @base: GPUVA base operation */
@@ -342,6 +408,8 @@ struct xe_vma_op {
struct list_head link;
/** @flags: operation flags */
enum xe_vma_op_flags flags;
+ /** @subop: user defined sub-operation */
+ enum xe_vma_subop subop;
/** @tile_mask: Tile mask for operation */
u8 tile_mask;
@@ -352,6 +420,10 @@ struct xe_vma_op {
struct xe_vma_op_remap remap;
/** @prefetch: VMA prefetch operation specific data */
struct xe_vma_op_prefetch prefetch;
+ /** @map_range: VMA map range operation specific data */
+ struct xe_vma_op_map_range map_range;
+ /** @unmap_range: VMA unmap range operation specific data */
+ struct xe_vma_op_unmap_range unmap_range;
};
};
diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c
index 80ba2fc78837..e421a74fb87c 100644
--- a/drivers/gpu/drm/xe/xe_vram.c
+++ b/drivers/gpu/drm/xe/xe_vram.c
@@ -49,7 +49,7 @@ _resize_bar(struct xe_device *xe, int resno, resource_size_t size)
*/
static void resize_vram_bar(struct xe_device *xe)
{
- u64 force_vram_bar_size = xe_modparam.force_vram_bar_size;
+ int force_vram_bar_size = xe_modparam.force_vram_bar_size;
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
struct pci_bus *root = pdev->bus;
resource_size_t current_size;
@@ -66,6 +66,9 @@ static void resize_vram_bar(struct xe_device *xe)
if (!bar_size_mask)
return;
+ if (force_vram_bar_size < 0)
+ return;
+
/* set to a specific size? */
if (force_vram_bar_size) {
u32 bar_size_bit;
@@ -169,7 +172,7 @@ static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size)
u64 offset_hi, offset_lo;
u32 nodes, num_enabled;
- reg = xe_mmio_read32(gt, MIRROR_FUSE3);
+ reg = xe_mmio_read32(&gt->mmio, MIRROR_FUSE3);
nodes = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, reg);
num_enabled = hweight32(nodes); /* Number of enabled l3 nodes */
@@ -185,7 +188,8 @@ static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size)
offset = round_up(offset, SZ_128K); /* SW must round up to nearest 128K */
/* We don't expect any holes */
- xe_assert_msg(xe, offset == (xe_mmio_read64_2x32(gt, GSMBASE) - ccs_size),
+ xe_assert_msg(xe, offset == (xe_mmio_read64_2x32(&gt_to_tile(gt)->mmio, GSMBASE) -
+ ccs_size),
"Hole between CCS and GSM.\n");
} else {
reg = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
@@ -219,8 +223,8 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size,
{
struct xe_device *xe = tile_to_xe(tile);
struct xe_gt *gt = tile->primary_gt;
+ unsigned int fw_ref;
u64 offset;
- int err;
u32 reg;
if (IS_SRIOV_VF(xe)) {
@@ -239,9 +243,9 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size,
return 0;
}
- err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (err)
- return err;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
/* actual size */
if (unlikely(xe->info.platform == XE_DG1)) {
@@ -257,13 +261,15 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size,
if (xe->info.has_flat_ccs) {
offset = get_flat_ccs_offset(gt, *tile_size);
} else {
- offset = xe_mmio_read64_2x32(gt, GSMBASE);
+ offset = xe_mmio_read64_2x32(&tile->mmio, GSMBASE);
}
/* remove the tile offset so we have just the available size */
*vram_size = offset - *tile_offset;
- return xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+
+ return 0;
}
static void vram_fini(void *arg)
diff --git a/drivers/gpu/drm/xe/xe_vsec.c b/drivers/gpu/drm/xe/xe_vsec.c
new file mode 100644
index 000000000000..b378848d3b7b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vsec.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright © 2024 Intel Corporation */
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/errno.h>
+#include <linux/intel_vsec.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_drv.h"
+#include "xe_mmio.h"
+#include "xe_platform_types.h"
+#include "xe_pm.h"
+#include "xe_vsec.h"
+
+#include "regs/xe_pmt.h"
+
+/* PMT GUID value for BMG devices. NOTE: this is NOT a PCI id */
+#define BMG_DEVICE_ID 0xE2F8
+
+static struct intel_vsec_header bmg_telemetry = {
+ .length = 0x10,
+ .id = VSEC_ID_TELEMETRY,
+ .num_entries = 2,
+ .entry_size = 4,
+ .tbir = 0,
+ .offset = BMG_DISCOVERY_OFFSET,
+};
+
+static struct intel_vsec_header bmg_punit_crashlog = {
+ .length = 0x10,
+ .id = VSEC_ID_CRASHLOG,
+ .num_entries = 1,
+ .entry_size = 4,
+ .tbir = 0,
+ .offset = BMG_DISCOVERY_OFFSET + 0x60,
+};
+
+static struct intel_vsec_header bmg_oobmsm_crashlog = {
+ .length = 0x10,
+ .id = VSEC_ID_CRASHLOG,
+ .num_entries = 1,
+ .entry_size = 4,
+ .tbir = 0,
+ .offset = BMG_DISCOVERY_OFFSET + 0x78,
+};
+
+static struct intel_vsec_header *bmg_capabilities[] = {
+ &bmg_telemetry,
+ &bmg_punit_crashlog,
+ &bmg_oobmsm_crashlog,
+ NULL
+};
+
+enum xe_vsec {
+ XE_VSEC_UNKNOWN = 0,
+ XE_VSEC_BMG,
+};
+
+static struct intel_vsec_platform_info xe_vsec_info[] = {
+ [XE_VSEC_BMG] = {
+ .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_CRASHLOG,
+ .headers = bmg_capabilities,
+ },
+ { }
+};
+
+/*
+ * The GUID will have the following bits to decode:
+ * [0:3] - {Telemetry space iteration number (0,1,..)}
+ * [4:7] - Segment (SEGMENT_INDEPENDENT-0, Client-1, Server-2)
+ * [8:11] - SOC_SKU
+ * [12:27] – Device ID – changes for each down bin SKU’s
+ * [28:29] - Capability Type (Crashlog-0, Telemetry Aggregator-1, Watcher-2)
+ * [30:31] - Record-ID (0-PUNIT, 1-OOBMSM_0, 2-OOBMSM_1)
+ */
+#define GUID_TELEM_ITERATION GENMASK(3, 0)
+#define GUID_SEGMENT GENMASK(7, 4)
+#define GUID_SOC_SKU GENMASK(11, 8)
+#define GUID_DEVICE_ID GENMASK(27, 12)
+#define GUID_CAP_TYPE GENMASK(29, 28)
+#define GUID_RECORD_ID GENMASK(31, 30)
+
+#define PUNIT_TELEMETRY_OFFSET 0x0200
+#define PUNIT_WATCHER_OFFSET 0x14A0
+#define OOBMSM_0_WATCHER_OFFSET 0x18D8
+#define OOBMSM_1_TELEMETRY_OFFSET 0x1000
+
+enum record_id {
+ PUNIT,
+ OOBMSM_0,
+ OOBMSM_1,
+};
+
+enum capability {
+ CRASHLOG,
+ TELEMETRY,
+ WATCHER,
+};
+
+static int xe_guid_decode(u32 guid, int *index, u32 *offset)
+{
+ u32 record_id = FIELD_GET(GUID_RECORD_ID, guid);
+ u32 cap_type = FIELD_GET(GUID_CAP_TYPE, guid);
+ u32 device_id = FIELD_GET(GUID_DEVICE_ID, guid);
+
+ if (device_id != BMG_DEVICE_ID)
+ return -ENODEV;
+
+ if (cap_type > WATCHER)
+ return -EINVAL;
+
+ *offset = 0;
+
+ if (cap_type == CRASHLOG) {
+ *index = record_id == PUNIT ? 2 : 4;
+ return 0;
+ }
+
+ switch (record_id) {
+ case PUNIT:
+ *index = 0;
+ if (cap_type == TELEMETRY)
+ *offset = PUNIT_TELEMETRY_OFFSET;
+ else
+ *offset = PUNIT_WATCHER_OFFSET;
+ break;
+
+ case OOBMSM_0:
+ *index = 1;
+ if (cap_type == WATCHER)
+ *offset = OOBMSM_0_WATCHER_OFFSET;
+ break;
+
+ case OOBMSM_1:
+ *index = 1;
+ if (cap_type == TELEMETRY)
+ *offset = OOBMSM_1_TELEMETRY_OFFSET;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset,
+ u32 count)
+{
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ void __iomem *telem_addr = xe->mmio.regs + BMG_TELEMETRY_OFFSET;
+ u32 mem_region;
+ u32 offset;
+ int ret;
+
+ ret = xe_guid_decode(guid, &mem_region, &offset);
+ if (ret)
+ return ret;
+
+ telem_addr += offset + user_offset;
+
+ guard(mutex)(&xe->pmt.lock);
+
+ /* indicate that we are not at an appropriate power level */
+ if (!xe_pm_runtime_get_if_active(xe))
+ return -ENODATA;
+
+ /* set SoC re-mapper index register based on GUID memory region */
+ xe_mmio_rmw32(xe_root_tile_mmio(xe), SG_REMAP_INDEX1, SG_REMAP_BITS,
+ REG_FIELD_PREP(SG_REMAP_BITS, mem_region));
+
+ memcpy_fromio(data, telem_addr, count);
+ xe_pm_runtime_put(xe);
+
+ return count;
+}
+
+static struct pmt_callbacks xe_pmt_cb = {
+ .read_telem = xe_pmt_telem_read,
+};
+
+static const int vsec_platforms[] = {
+ [XE_BATTLEMAGE] = XE_VSEC_BMG,
+};
+
+static enum xe_vsec get_platform_info(struct xe_device *xe)
+{
+ if (xe->info.platform > XE_BATTLEMAGE)
+ return XE_VSEC_UNKNOWN;
+
+ return vsec_platforms[xe->info.platform];
+}
+
+/**
+ * xe_vsec_init - Initialize resources and add intel_vsec auxiliary
+ * interface
+ * @xe: valid xe instance
+ */
+void xe_vsec_init(struct xe_device *xe)
+{
+ struct intel_vsec_platform_info *info;
+ struct device *dev = xe->drm.dev;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ enum xe_vsec platform;
+
+ platform = get_platform_info(xe);
+ if (platform == XE_VSEC_UNKNOWN)
+ return;
+
+ info = &xe_vsec_info[platform];
+ if (!info->headers)
+ return;
+
+ switch (platform) {
+ case XE_VSEC_BMG:
+ info->priv_data = &xe_pmt_cb;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Register a VSEC. Cleanup is handled using device managed
+ * resources.
+ */
+ intel_vsec_register(pdev, info);
+}
+MODULE_IMPORT_NS("INTEL_VSEC");
diff --git a/drivers/gpu/drm/xe/xe_vsec.h b/drivers/gpu/drm/xe/xe_vsec.h
new file mode 100644
index 000000000000..5777c53faec2
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vsec.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright © 2024 Intel Corporation */
+
+#ifndef _XE_VSEC_H_
+#define _XE_VSEC_H_
+
+struct xe_device;
+
+void xe_vsec_init(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 353936a0f877..67196baa4249 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -8,6 +8,7 @@
#include <drm/drm_managed.h>
#include <kunit/visibility.h>
#include <linux/compiler_types.h>
+#include <linux/fault-inject.h>
#include <generated/xe_wa_oob.h>
@@ -229,6 +230,18 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
},
+ /* Xe2_HPG */
+
+ { XE_RTP_NAME("16025250150"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001)),
+ XE_RTP_ACTIONS(SET(LSN_VC_REG2,
+ LSN_LNI_WGT(1) |
+ LSN_LNE_WGT(1) |
+ LSN_DIM_X_WGT(1) |
+ LSN_DIM_Y_WGT(1) |
+ LSN_DIM_Z_WGT(1)))
+ },
+
/* Xe2_HPM */
{ XE_RTP_NAME("16021867713"),
@@ -251,7 +264,33 @@ static const struct xe_rtp_entry_sr gt_was[] = {
XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
},
- {}
+ /* Xe3_LPG */
+
+ { XE_RTP_NAME("14021871409"),
+ XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0)),
+ XE_RTP_ACTIONS(SET(UNSLCGCTL9454, LSCFE_CLKGATE_DIS))
+ },
+
+ /* Xe3_LPM */
+
+ { XE_RTP_NAME("16021867713"),
+ XE_RTP_RULES(MEDIA_VERSION(3000),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F1C(0), MFXPIPE_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+ { XE_RTP_NAME("16021865536"),
+ XE_RTP_RULES(MEDIA_VERSION(3000),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), IECPUNIT_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
+ { XE_RTP_NAME("14021486841"),
+ XE_RTP_RULES(MEDIA_VERSION(3000), MEDIA_STEP(A0, B0),
+ ENGINE_CLASS(VIDEO_DECODE)),
+ XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), RAMDFTUNIT_CLKGATE_DIS)),
+ XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
+ },
};
static const struct xe_rtp_entry_sr engine_was[] = {
@@ -567,10 +606,57 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
- {}
+ /* Xe3_LPG */
+
+ { XE_RTP_NAME("14021402888"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
+ },
+ { XE_RTP_NAME("18034896535"),
+ XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(ROW_CHICKEN4, DISABLE_TDL_PUSH))
+ },
+ { XE_RTP_NAME("16024792527"),
+ XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(FIELD_SET(SAMPLER_MODE, SMP_WAIT_FETCH_MERGING_COUNTER,
+ SMP_FORCE_128B_OVERFETCH))
+ },
+ { XE_RTP_NAME("14023061436"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(TDL_CHICKEN, QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE))
+ },
+ { XE_RTP_NAME("13012615864"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS))
+ },
+ { XE_RTP_NAME("16023105232"),
+ XE_RTP_RULES(MEDIA_VERSION_RANGE(1301, 3000), OR,
+ GRAPHICS_VERSION_RANGE(2001, 3001)),
+ XE_RTP_ACTIONS(SET(RING_PSMI_CTL(0), RC_SEMA_IDLE_MSG_DISABLE,
+ XE_RTP_ACTION_FLAG(ENGINE_BASE)))
+ },
};
static const struct xe_rtp_entry_sr lrc_was[] = {
+ { XE_RTP_NAME("16011163337"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
+ /* read verification is ignored due to 1608008084. */
+ XE_RTP_ACTIONS(FIELD_SET_NO_READ_MASK(FF_MODE2,
+ FF_MODE2_GS_TIMER_MASK,
+ FF_MODE2_GS_TIMER_224))
+ },
+ { XE_RTP_NAME("1604555607"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210), ENGINE_CLASS(RENDER)),
+ /* read verification is ignored due to 1608008084. */
+ XE_RTP_ACTIONS(FIELD_SET_NO_READ_MASK(FF_MODE2,
+ FF_MODE2_TDS_TIMER_MASK,
+ FF_MODE2_TDS_TIMER_128))
+ },
{ XE_RTP_NAME("1409342910, 14010698770, 14010443199, 1408979724, 1409178076, 1409207793, 1409217633, 1409252684, 1409347922, 1409142259"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(1200, 1210)),
XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN3,
@@ -741,8 +827,22 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
},
+ { XE_RTP_NAME("22021007897"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE))
+ },
- {}
+ /* Xe3_LPG */
+ { XE_RTP_NAME("14021490052"),
+ XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(FF_MODE,
+ DIS_MESH_PARTIAL_AUTOSTRIP |
+ DIS_MESH_AUTOSTRIP),
+ SET(VFLSKPD,
+ DIS_PARTIAL_AUTOSTRIP |
+ DIS_AUTOSTRIP))
+ },
};
static __maybe_unused const struct xe_rtp_entry oob_was[] = {
@@ -784,7 +884,7 @@ void xe_wa_process_gt(struct xe_gt *gt)
xe_rtp_process_ctx_enable_active_tracking(&ctx, gt->wa_active.gt,
ARRAY_SIZE(gt_was));
- xe_rtp_process_to_sr(&ctx, gt_was, &gt->reg_sr);
+ xe_rtp_process_to_sr(&ctx, gt_was, ARRAY_SIZE(gt_was), &gt->reg_sr);
}
EXPORT_SYMBOL_IF_KUNIT(xe_wa_process_gt);
@@ -802,7 +902,7 @@ void xe_wa_process_engine(struct xe_hw_engine *hwe)
xe_rtp_process_ctx_enable_active_tracking(&ctx, hwe->gt->wa_active.engine,
ARRAY_SIZE(engine_was));
- xe_rtp_process_to_sr(&ctx, engine_was, &hwe->reg_sr);
+ xe_rtp_process_to_sr(&ctx, engine_was, ARRAY_SIZE(engine_was), &hwe->reg_sr);
}
/**
@@ -819,7 +919,7 @@ void xe_wa_process_lrc(struct xe_hw_engine *hwe)
xe_rtp_process_ctx_enable_active_tracking(&ctx, hwe->gt->wa_active.lrc,
ARRAY_SIZE(lrc_was));
- xe_rtp_process_to_sr(&ctx, lrc_was, &hwe->reg_lrc);
+ xe_rtp_process_to_sr(&ctx, lrc_was, ARRAY_SIZE(lrc_was), &hwe->reg_lrc);
}
/**
@@ -854,6 +954,7 @@ int xe_wa_init(struct xe_gt *gt)
return 0;
}
+ALLOW_ERROR_INJECTION(xe_wa_init, ERRNO); /* See xe_pci_probe() */
void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p)
{
@@ -891,11 +992,11 @@ void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p)
*/
void xe_wa_apply_tile_workarounds(struct xe_tile *tile)
{
- struct xe_gt *mmio = tile->primary_gt;
+ struct xe_mmio *mmio = &tile->mmio;
if (IS_SRIOV_VF(tile->xe))
return;
- if (XE_WA(mmio, 22010954014))
+ if (XE_WA(tile->primary_gt, 22010954014))
xe_mmio_rmw32(mmio, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS);
}
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 920ca5060146..9efc5accd43d 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -1,9 +1,11 @@
+1607983814 GRAPHICS_VERSION_RANGE(1200, 1210)
22012773006 GRAPHICS_VERSION_RANGE(1200, 1250)
14014475959 GRAPHICS_VERSION_RANGE(1270, 1271), GRAPHICS_STEP(A0, B0)
PLATFORM(DG2)
22011391025 PLATFORM(DG2)
22012727170 SUBPLATFORM(DG2, G11)
22012727685 SUBPLATFORM(DG2, G11)
+22016596838 PLATFORM(PVC)
18020744125 PLATFORM(PVC)
1509372804 PLATFORM(PVC), GRAPHICS_STEP(A0, C0)
1409600907 GRAPHICS_VERSION_RANGE(1200, 1250)
@@ -27,13 +29,33 @@
16022287689 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
13011645652 GRAPHICS_VERSION(2004)
+ GRAPHICS_VERSION(3001)
14022293748 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
+ GRAPHICS_VERSION_RANGE(3000, 3001)
22019794406 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
+ GRAPHICS_VERSION_RANGE(3000, 3001)
22019338487 MEDIA_VERSION(2000)
GRAPHICS_VERSION(2001)
+ MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf)
22019338487_display PLATFORM(LUNARLAKE)
16023588340 GRAPHICS_VERSION(2001)
14019789679 GRAPHICS_VERSION(1255)
GRAPHICS_VERSION_RANGE(1270, 2004)
+no_media_l3 MEDIA_VERSION(3000)
+14022866841 GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0)
+ MEDIA_VERSION(3000), MEDIA_STEP(A0, B0)
+16021333562 GRAPHICS_VERSION_RANGE(1200, 1274)
+ MEDIA_VERSION(1300)
+14016712196 GRAPHICS_VERSION(1255)
+ GRAPHICS_VERSION_RANGE(1270, 1274)
+14015568240 GRAPHICS_VERSION_RANGE(1255, 1260)
+18013179988 GRAPHICS_VERSION(1255)
+ GRAPHICS_VERSION_RANGE(1270, 1274)
+1508761755 GRAPHICS_VERSION(1255)
+ GRAPHICS_VERSION(1260), GRAPHICS_STEP(A0, B0)
+16023105232 GRAPHICS_VERSION_RANGE(2001, 3001)
+ MEDIA_VERSION_RANGE(1301, 3000)
+16026508708 GRAPHICS_VERSION_RANGE(1200, 3001)
+ MEDIA_VERSION_RANGE(1300, 3000)
diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c b/drivers/gpu/drm/xe/xe_wait_user_fence.c
index f5deb81eba01..5b4264ea38bd 100644
--- a/drivers/gpu/drm/xe/xe_wait_user_fence.c
+++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c
@@ -155,6 +155,13 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
}
if (!timeout) {
+ LNL_FLUSH_WORKQUEUE(xe->ordered_wq);
+ err = do_compare(addr, args->value, args->mask,
+ args->op);
+ if (err <= 0) {
+ drm_dbg(&xe->drm, "LNL_FLUSH_WORKQUEUE resolved ufence timeout\n");
+ break;
+ }
err = -ETIME;
break;
}
diff --git a/drivers/gpu/drm/xe/xe_wopcm.c b/drivers/gpu/drm/xe/xe_wopcm.c
index d3a99157e523..ada0d0aa6b74 100644
--- a/drivers/gpu/drm/xe/xe_wopcm.c
+++ b/drivers/gpu/drm/xe/xe_wopcm.c
@@ -5,6 +5,8 @@
#include "xe_wopcm.h"
+#include <linux/fault-inject.h>
+
#include "regs/xe_guc_regs.h"
#include "xe_device.h"
#include "xe_force_wake.h"
@@ -123,8 +125,8 @@ static bool __check_layout(struct xe_device *xe, u32 wopcm_size,
static bool __wopcm_regs_locked(struct xe_gt *gt,
u32 *guc_wopcm_base, u32 *guc_wopcm_size)
{
- u32 reg_base = xe_mmio_read32(gt, DMA_GUC_WOPCM_OFFSET);
- u32 reg_size = xe_mmio_read32(gt, GUC_WOPCM_SIZE);
+ u32 reg_base = xe_mmio_read32(&gt->mmio, DMA_GUC_WOPCM_OFFSET);
+ u32 reg_size = xe_mmio_read32(&gt->mmio, GUC_WOPCM_SIZE);
if (!(reg_size & GUC_WOPCM_SIZE_LOCKED) ||
!(reg_base & GUC_WOPCM_OFFSET_VALID))
@@ -150,13 +152,13 @@ static int __wopcm_init_regs(struct xe_device *xe, struct xe_gt *gt,
XE_WARN_ON(size & ~GUC_WOPCM_SIZE_MASK);
mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED;
- err = xe_mmio_write32_and_verify(gt, GUC_WOPCM_SIZE, size, mask,
+ err = xe_mmio_write32_and_verify(&gt->mmio, GUC_WOPCM_SIZE, size, mask,
size | GUC_WOPCM_SIZE_LOCKED);
if (err)
goto err_out;
mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
- err = xe_mmio_write32_and_verify(gt, DMA_GUC_WOPCM_OFFSET,
+ err = xe_mmio_write32_and_verify(&gt->mmio, DMA_GUC_WOPCM_OFFSET,
base | huc_agent, mask,
base | huc_agent |
GUC_WOPCM_OFFSET_VALID);
@@ -169,10 +171,10 @@ err_out:
drm_notice(&xe->drm, "Failed to init uC WOPCM registers!\n");
drm_notice(&xe->drm, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
DMA_GUC_WOPCM_OFFSET.addr,
- xe_mmio_read32(gt, DMA_GUC_WOPCM_OFFSET));
+ xe_mmio_read32(&gt->mmio, DMA_GUC_WOPCM_OFFSET));
drm_notice(&xe->drm, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
GUC_WOPCM_SIZE.addr,
- xe_mmio_read32(gt, GUC_WOPCM_SIZE));
+ xe_mmio_read32(&gt->mmio, GUC_WOPCM_SIZE));
return err;
}
@@ -268,3 +270,4 @@ check:
return ret;
}
+ALLOW_ERROR_INJECTION(xe_wopcm_init, ERRNO); /* See xe_pci_probe() */