diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
145 files changed, 6421 insertions, 2714 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index ee85e8aba636..c56320e78c0e 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -51,12 +51,15 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ amdgpu_dma_buf.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ - amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ - amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \ - amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ + amdgpu_gtt_mgr.o amdgpu_preempt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o \ + amdgpu_atomfirmware.o amdgpu_vf_error.o amdgpu_sched.o \ + amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o amdgpu_mmhub.o \ + amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \ amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \ - amdgpu_fw_attestation.o amdgpu_securedisplay.o + amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu_hdp.o + +amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o @@ -71,7 +74,8 @@ amdgpu-y += \ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \ vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \ arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \ - nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o + nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o \ + beige_goby_reg_init.o yellow_carp_reg_init.o # add DF block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c index 65b1dca4b02e..148f6c3343ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c @@ -227,7 +227,7 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) break; default: break; - }; + } } /* Reinit NBIF block */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 264176a01e16..c0316eaba547 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -107,6 +107,7 @@ #include "amdgpu_gfxhub.h" #include "amdgpu_df.h" #include "amdgpu_smuio.h" +#include "amdgpu_fdinfo.h" #define MAX_GPU_INSTANCE 16 @@ -129,6 +130,13 @@ struct amdgpu_mgpu_info bool pending_reset; }; +enum amdgpu_ss { + AMDGPU_SS_DRV_LOAD, + AMDGPU_SS_DEV_D0, + AMDGPU_SS_DEV_D3, + AMDGPU_SS_DRV_UNLOAD +}; + struct amdgpu_watchdog_timer { bool timeout_fatal_disable; @@ -203,6 +211,7 @@ extern int amdgpu_discovery; extern int amdgpu_mes; extern int amdgpu_noretry; extern int amdgpu_force_asic_type; +extern int amdgpu_smartshift_bias; #ifdef CONFIG_HSA_AMD extern int sched_policy; extern bool debug_evictions; @@ -260,6 +269,10 @@ extern int amdgpu_num_kcq; #define CIK_CURSOR_WIDTH 128 #define CIK_CURSOR_HEIGHT 128 +/* smasrt shift bias level limits */ +#define AMDGPU_SMARTSHIFT_MAX_BIAS (100) +#define AMDGPU_SMARTSHIFT_MIN_BIAS (-100) + struct amdgpu_device; struct amdgpu_ib; struct amdgpu_cs_parser; @@ -267,7 +280,6 @@ struct amdgpu_job; struct amdgpu_irq_src; struct amdgpu_fpriv; struct amdgpu_bo_va_mapping; -struct amdgpu_atif; struct kfd_vm_fault_info; struct amdgpu_hive_info; struct amdgpu_reset_context; @@ -682,20 +694,6 @@ struct amdgpu_vram_scratch { }; /* - * ACPI - */ -struct amdgpu_atcs_functions { - bool get_ext_state; - bool pcie_perf_req; - bool pcie_dev_rdy; - bool pcie_bus_width; -}; - -struct amdgpu_atcs { - struct amdgpu_atcs_functions functions; -}; - -/* * CGS */ struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); @@ -824,8 +822,6 @@ struct amdgpu_device { struct notifier_block acpi_nb; struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; struct debugfs_blob_wrapper debugfs_vbios_blob; - struct amdgpu_atif *atif; - struct amdgpu_atcs atcs; struct mutex srbm_mutex; /* GRBM index mutex. Protects concurrent access to GRBM index */ struct mutex grbm_idx_mutex; @@ -1074,9 +1070,10 @@ struct amdgpu_device { atomic_t throttling_logging_enabled; struct ratelimit_state throttling_logging_rs; - uint32_t ras_features; + uint32_t ras_hw_enabled; + uint32_t ras_enabled; - bool in_pci_err_recovery; + bool no_hw_access; struct pci_saved_state *pci_state; struct amdgpu_reset_control *reset_cntl; @@ -1099,7 +1096,9 @@ static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev) int amdgpu_device_init(struct amdgpu_device *adev, uint32_t flags); -void amdgpu_device_fini(struct amdgpu_device *adev); +void amdgpu_device_fini_hw(struct amdgpu_device *adev); +void amdgpu_device_fini_sw(struct amdgpu_device *adev); + int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, @@ -1142,6 +1141,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev); * Registers read & write functions. */ #define AMDGPU_REGS_NO_KIQ (1<<1) +#define AMDGPU_REGS_RLC (1<<2) #define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) #define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) @@ -1278,12 +1278,18 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev); bool amdgpu_device_supports_atpx(struct drm_device *dev); bool amdgpu_device_supports_px(struct drm_device *dev); bool amdgpu_device_supports_boco(struct drm_device *dev); +bool amdgpu_device_supports_smart_shift(struct drm_device *dev); bool amdgpu_device_supports_baco(struct drm_device *dev); bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); int amdgpu_device_baco_enter(struct drm_device *dev); int amdgpu_device_baco_exit(struct drm_device *dev); +void amdgpu_device_flush_hdp(struct amdgpu_device *adev, + struct amdgpu_ring *ring); +void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, + struct amdgpu_ring *ring); + /* atpx handler */ #if defined(CONFIG_VGA_SWITCHEROO) void amdgpu_register_atpx_handler(void); @@ -1319,6 +1325,8 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev); int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); void amdgpu_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv); +void amdgpu_driver_release_kms(struct drm_device *dev); + int amdgpu_device_ip_suspend(struct amdgpu_device *adev); int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); int amdgpu_device_resume(struct drm_device *dev, bool fbcon); @@ -1350,21 +1358,38 @@ struct amdgpu_afmt_acr { struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock); /* amdgpu_acpi.c */ + +/* ATCS Device/Driver State */ +#define AMDGPU_ATCS_PSC_DEV_STATE_D0 0 +#define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT 3 +#define AMDGPU_ATCS_PSC_DRV_STATE_OPR 0 +#define AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR 1 + #if defined(CONFIG_ACPI) int amdgpu_acpi_init(struct amdgpu_device *adev); void amdgpu_acpi_fini(struct amdgpu_device *adev); bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev); +bool amdgpu_acpi_is_power_shift_control_supported(void); int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, u8 perf_req, bool advertise); +int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, + u8 dev_state, bool drv_state); +int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state); int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); -void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev, - struct amdgpu_dm_backlight_caps *caps); +void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps); bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev); +void amdgpu_acpi_detect(void); #else static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } static inline bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev) { return false; } +static inline void amdgpu_acpi_detect(void) { } +static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } +static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, + u8 dev_state, bool drv_state) { return 0; } +static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, + enum amdgpu_ss ss_state) { return 0; } #endif int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 2e9b16fb3fcd..84a1b4bc9bb4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -71,12 +71,31 @@ struct amdgpu_atif { struct amdgpu_dm_backlight_caps backlight_caps; }; +struct amdgpu_atcs_functions { + bool get_ext_state; + bool pcie_perf_req; + bool pcie_dev_rdy; + bool pcie_bus_width; + bool power_shift_control; +}; + +struct amdgpu_atcs { + acpi_handle handle; + + struct amdgpu_atcs_functions functions; +}; + +static struct amdgpu_acpi_priv { + struct amdgpu_atif atif; + struct amdgpu_atcs atcs; +} amdgpu_acpi_priv; + /* Call the ATIF method */ /** * amdgpu_atif_call - call an ATIF method * - * @handle: acpi handle + * @atif: atif structure * @function: the ATIF function to execute * @params: ATIF function params * @@ -166,7 +185,6 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas /** * amdgpu_atif_verify_interface - verify ATIF * - * @handle: acpi handle * @atif: amdgpu atif struct * * Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function @@ -208,40 +226,10 @@ out: return err; } -static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle) -{ - acpi_handle handle = NULL; - char acpi_method_name[255] = { 0 }; - struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name }; - acpi_status status; - - /* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only - * systems, ATIF is in the dGPU's namespace. - */ - status = acpi_get_handle(dhandle, "ATIF", &handle); - if (ACPI_SUCCESS(status)) - goto out; - - if (amdgpu_has_atpx()) { - status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF", - &handle); - if (ACPI_SUCCESS(status)) - goto out; - } - - DRM_DEBUG_DRIVER("No ATIF handle found\n"); - return NULL; -out: - acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); - DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name); - return handle; -} - /** * amdgpu_atif_get_notification_params - determine notify configuration * - * @handle: acpi handle - * @n: atif notification configuration struct + * @atif: acpi handle * * Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function * to determine if a notifier is used and if so which one @@ -304,7 +292,7 @@ out: /** * amdgpu_atif_query_backlight_caps - get min and max backlight input signal * - * @handle: acpi handle + * @atif: acpi handle * * Execute the QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS ATIF function * to determine the acceptable range of backlight values @@ -363,7 +351,7 @@ out: /** * amdgpu_atif_get_sbios_requests - get requested sbios event * - * @handle: acpi handle + * @atif: acpi handle * @req: atif sbios request struct * * Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function @@ -416,7 +404,7 @@ out: static int amdgpu_atif_handler(struct amdgpu_device *adev, struct acpi_bus_event *event) { - struct amdgpu_atif *atif = adev->atif; + struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif; int count; DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n", @@ -426,8 +414,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, return NOTIFY_DONE; /* Is this actually our event? */ - if (!atif || - !atif->notification_cfg.enabled || + if (!atif->notification_cfg.enabled || event->type != atif->notification_cfg.command_code) { /* These events will generate keypresses otherwise */ if (event->type == ACPI_VIDEO_NOTIFY_PROBE) @@ -487,14 +474,15 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, /** * amdgpu_atcs_call - call an ATCS method * - * @handle: acpi handle + * @atcs: atcs structure * @function: the ATCS function to execute * @params: ATCS function params * * Executes the requested ATCS function (all asics). * Returns a pointer to the acpi output buffer. */ -static union acpi_object *amdgpu_atcs_call(acpi_handle handle, int function, +static union acpi_object *amdgpu_atcs_call(struct amdgpu_atcs *atcs, + int function, struct acpi_buffer *params) { acpi_status status; @@ -518,7 +506,7 @@ static union acpi_object *amdgpu_atcs_call(acpi_handle handle, int function, atcs_arg_elements[1].integer.value = 0; } - status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer); + status = acpi_evaluate_object(atcs->handle, NULL, &atcs_arg, &buffer); /* Fail only if calling the method fails and ATIF is supported */ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { @@ -547,12 +535,12 @@ static void amdgpu_atcs_parse_functions(struct amdgpu_atcs_functions *f, u32 mas f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED; f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED; f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED; + f->power_shift_control = mask & ATCS_SET_POWER_SHIFT_CONTROL_SUPPORTED; } /** * amdgpu_atcs_verify_interface - verify ATCS * - * @handle: acpi handle * @atcs: amdgpu atcs struct * * Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function @@ -560,15 +548,14 @@ static void amdgpu_atcs_parse_functions(struct amdgpu_atcs_functions *f, u32 mas * (all asics). * returns 0 on success, error on failure. */ -static int amdgpu_atcs_verify_interface(acpi_handle handle, - struct amdgpu_atcs *atcs) +static int amdgpu_atcs_verify_interface(struct amdgpu_atcs *atcs) { union acpi_object *info; struct atcs_verify_interface output; size_t size; int err = 0; - info = amdgpu_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL); + info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_VERIFY_INTERFACE, NULL); if (!info) return -EIO; @@ -605,7 +592,7 @@ out: */ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev) { - struct amdgpu_atcs *atcs = &adev->atcs; + struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs; if (atcs->functions.pcie_perf_req && atcs->functions.pcie_dev_rdy) return true; @@ -614,6 +601,18 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade } /** + * amdgpu_acpi_is_power_shift_control_supported + * + * Check if the ATCS power shift control method + * is supported. + * returns true if supported, false if not. + */ +bool amdgpu_acpi_is_power_shift_control_supported(void) +{ + return amdgpu_acpi_priv.atcs.functions.power_shift_control; +} + +/** * amdgpu_acpi_pcie_notify_device_ready * * @adev: amdgpu_device pointer @@ -624,19 +623,13 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade */ int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev) { - acpi_handle handle; union acpi_object *info; - struct amdgpu_atcs *atcs = &adev->atcs; - - /* Get the device handle */ - handle = ACPI_HANDLE(&adev->pdev->dev); - if (!handle) - return -EINVAL; + struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs; if (!atcs->functions.pcie_dev_rdy) return -EINVAL; - info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL); + info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL); if (!info) return -EIO; @@ -659,9 +652,8 @@ int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev) int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, u8 perf_req, bool advertise) { - acpi_handle handle; union acpi_object *info; - struct amdgpu_atcs *atcs = &adev->atcs; + struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs; struct atcs_pref_req_input atcs_input; struct atcs_pref_req_output atcs_output; struct acpi_buffer params; @@ -671,11 +663,6 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, if (amdgpu_acpi_pcie_notify_device_ready(adev)) return -EINVAL; - /* Get the device handle */ - handle = ACPI_HANDLE(&adev->pdev->dev); - if (!handle) - return -EINVAL; - if (!atcs->functions.pcie_perf_req) return -EINVAL; @@ -693,7 +680,7 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, params.pointer = &atcs_input; while (retry--) { - info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, ¶ms); + info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, ¶ms); if (!info) return -EIO; @@ -727,6 +714,96 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, } /** + * amdgpu_acpi_power_shift_control + * + * @adev: amdgpu_device pointer + * @dev_state: device acpi state + * @drv_state: driver state + * + * Executes the POWER_SHIFT_CONTROL method to + * communicate current dGPU device state and + * driver state to APU/SBIOS. + * returns 0 on success, error on failure. + */ +int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, + u8 dev_state, bool drv_state) +{ + union acpi_object *info; + struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs; + struct atcs_pwr_shift_input atcs_input; + struct acpi_buffer params; + + if (!amdgpu_acpi_is_power_shift_control_supported()) + return -EINVAL; + + atcs_input.size = sizeof(struct atcs_pwr_shift_input); + /* dGPU id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */ + atcs_input.dgpu_id = adev->pdev->devfn | (adev->pdev->bus->number << 8); + atcs_input.dev_acpi_state = dev_state; + atcs_input.drv_state = drv_state; + + params.length = sizeof(struct atcs_pwr_shift_input); + params.pointer = &atcs_input; + + info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_POWER_SHIFT_CONTROL, ¶ms); + if (!info) { + DRM_ERROR("ATCS PSC update failed\n"); + return -EIO; + } + + return 0; +} + +/** + * amdgpu_acpi_smart_shift_update - update dGPU device state to SBIOS + * + * @dev: drm_device pointer + * @ss_state: current smart shift event + * + * returns 0 on success, + * otherwise return error number. + */ +int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + int r; + + if (!amdgpu_device_supports_smart_shift(dev)) + return 0; + + switch (ss_state) { + /* SBIOS trigger “stop”, “enable” and “start” at D0, Driver Operational. + * SBIOS trigger “stop” at D3, Driver Not Operational. + * SBIOS trigger “stop” and “disable” at D0, Driver NOT operational. + */ + case AMDGPU_SS_DRV_LOAD: + r = amdgpu_acpi_power_shift_control(adev, + AMDGPU_ATCS_PSC_DEV_STATE_D0, + AMDGPU_ATCS_PSC_DRV_STATE_OPR); + break; + case AMDGPU_SS_DEV_D0: + r = amdgpu_acpi_power_shift_control(adev, + AMDGPU_ATCS_PSC_DEV_STATE_D0, + AMDGPU_ATCS_PSC_DRV_STATE_OPR); + break; + case AMDGPU_SS_DEV_D3: + r = amdgpu_acpi_power_shift_control(adev, + AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT, + AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR); + break; + case AMDGPU_SS_DRV_UNLOAD: + r = amdgpu_acpi_power_shift_control(adev, + AMDGPU_ATCS_PSC_DEV_STATE_D0, + AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR); + break; + default: + return -EINVAL; + } + + return r; +} + +/** * amdgpu_acpi_event - handle notify events * * @nb: notifier block @@ -769,50 +846,15 @@ static int amdgpu_acpi_event(struct notifier_block *nb, */ int amdgpu_acpi_init(struct amdgpu_device *adev) { - acpi_handle handle, atif_handle; - struct amdgpu_atif *atif; - struct amdgpu_atcs *atcs = &adev->atcs; - int ret; - - /* Get the device handle */ - handle = ACPI_HANDLE(&adev->pdev->dev); - - if (!adev->bios || !handle) - return 0; - - /* Call the ATCS method */ - ret = amdgpu_atcs_verify_interface(handle, atcs); - if (ret) { - DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret); - } - - /* Probe for ATIF, and initialize it if found */ - atif_handle = amdgpu_atif_probe_handle(handle); - if (!atif_handle) - goto out; - - atif = kzalloc(sizeof(*atif), GFP_KERNEL); - if (!atif) { - DRM_WARN("Not enough memory to initialize ATIF\n"); - goto out; - } - atif->handle = atif_handle; - - /* Call the ATIF method */ - ret = amdgpu_atif_verify_interface(atif); - if (ret) { - DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret); - kfree(atif); - goto out; - } - adev->atif = atif; + struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif; #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) if (atif->notifications.brightness_change) { if (amdgpu_device_has_dc_support(adev)) { #if defined(CONFIG_DRM_AMD_DC) struct amdgpu_display_manager *dm = &adev->dm; - atif->bd = dm->backlight_dev; + if (dm->backlight_dev) + atif->bd = dm->backlight_dev; #endif } else { struct drm_encoder *tmp; @@ -834,6 +876,129 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) } } #endif + adev->acpi_nb.notifier_call = amdgpu_acpi_event; + register_acpi_notifier(&adev->acpi_nb); + + return 0; +} + +void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) +{ + struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif; + + caps->caps_valid = atif->backlight_caps.caps_valid; + caps->min_input_signal = atif->backlight_caps.min_input_signal; + caps->max_input_signal = atif->backlight_caps.max_input_signal; +} + +/** + * amdgpu_acpi_fini - tear down driver acpi support + * + * @adev: amdgpu_device pointer + * + * Unregisters with the acpi notifier chain (all asics). + */ +void amdgpu_acpi_fini(struct amdgpu_device *adev) +{ + unregister_acpi_notifier(&adev->acpi_nb); +} + +/** + * amdgpu_atif_pci_probe_handle - look up the ATIF handle + * + * @pdev: pci device + * + * Look up the ATIF handles (all asics). + * Returns true if the handle is found, false if not. + */ +static bool amdgpu_atif_pci_probe_handle(struct pci_dev *pdev) +{ + char acpi_method_name[255] = { 0 }; + struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; + acpi_handle dhandle, atif_handle; + acpi_status status; + int ret; + + dhandle = ACPI_HANDLE(&pdev->dev); + if (!dhandle) + return false; + + status = acpi_get_handle(dhandle, "ATIF", &atif_handle); + if (ACPI_FAILURE(status)) { + return false; + } + amdgpu_acpi_priv.atif.handle = atif_handle; + acpi_get_name(amdgpu_acpi_priv.atif.handle, ACPI_FULL_PATHNAME, &buffer); + DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name); + ret = amdgpu_atif_verify_interface(&amdgpu_acpi_priv.atif); + if (ret) { + amdgpu_acpi_priv.atif.handle = 0; + return false; + } + return true; +} + +/** + * amdgpu_atcs_pci_probe_handle - look up the ATCS handle + * + * @pdev: pci device + * + * Look up the ATCS handles (all asics). + * Returns true if the handle is found, false if not. + */ +static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev) +{ + char acpi_method_name[255] = { 0 }; + struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name }; + acpi_handle dhandle, atcs_handle; + acpi_status status; + int ret; + + dhandle = ACPI_HANDLE(&pdev->dev); + if (!dhandle) + return false; + + status = acpi_get_handle(dhandle, "ATCS", &atcs_handle); + if (ACPI_FAILURE(status)) { + return false; + } + amdgpu_acpi_priv.atcs.handle = atcs_handle; + acpi_get_name(amdgpu_acpi_priv.atcs.handle, ACPI_FULL_PATHNAME, &buffer); + DRM_DEBUG_DRIVER("Found ATCS handle %s\n", acpi_method_name); + ret = amdgpu_atcs_verify_interface(&amdgpu_acpi_priv.atcs); + if (ret) { + amdgpu_acpi_priv.atcs.handle = 0; + return false; + } + return true; +} + +/* + * amdgpu_acpi_detect - detect ACPI ATIF/ATCS methods + * + * Check if we have the ATIF/ATCS methods and populate + * the structures in the driver. + */ +void amdgpu_acpi_detect(void) +{ + struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif; + struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs; + struct pci_dev *pdev = NULL; + int ret; + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { + if (!atif->handle) + amdgpu_atif_pci_probe_handle(pdev); + if (!atcs->handle) + amdgpu_atcs_pci_probe_handle(pdev); + } + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { + if (!atif->handle) + amdgpu_atif_pci_probe_handle(pdev); + if (!atcs->handle) + amdgpu_atcs_pci_probe_handle(pdev); + } if (atif->functions.sbios_requests && !atif->functions.system_params) { /* XXX check this workraround, if sbios request function is @@ -863,42 +1028,13 @@ int amdgpu_acpi_init(struct amdgpu_device *adev) } else { atif->backlight_caps.caps_valid = false; } - -out: - adev->acpi_nb.notifier_call = amdgpu_acpi_event; - register_acpi_notifier(&adev->acpi_nb); - - return ret; -} - -void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev, - struct amdgpu_dm_backlight_caps *caps) -{ - if (!adev->atif) { - caps->caps_valid = false; - return; - } - caps->caps_valid = adev->atif->backlight_caps.caps_valid; - caps->min_input_signal = adev->atif->backlight_caps.min_input_signal; - caps->max_input_signal = adev->atif->backlight_caps.max_input_signal; -} - -/** - * amdgpu_acpi_fini - tear down driver acpi support - * - * @adev: amdgpu_device pointer - * - * Unregisters with the acpi notifier chain (all asics). - */ -void amdgpu_acpi_fini(struct amdgpu_device *adev) -{ - unregister_acpi_notifier(&adev->acpi_nb); - kfree(adev->atif); } /** * amdgpu_acpi_is_s0ix_supported * + * @adev: amdgpu_device_pointer + * * returns true if supported, false if not. */ bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 5f6696a3c778..f9c01bdc3d4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -170,7 +170,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) } } -void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev) +void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev) { if (adev->kfd.dev) { kgd2kfd_device_exit(adev->kfd.dev); @@ -670,10 +670,10 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid) return 0; } -int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid) +int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid, + enum TLB_FLUSH_TYPE flush_type) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - const uint32_t flush_type = 0; bool all_hub = false; if (adev->family == AMDGPU_FAMILY_AI) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 5ffb07b02810..db16b3e83694 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -36,13 +36,26 @@ extern uint64_t amdgpu_amdkfd_total_mem_size; +enum TLB_FLUSH_TYPE { + TLB_FLUSH_LEGACY = 0, + TLB_FLUSH_LIGHTWEIGHT, + TLB_FLUSH_HEAVYWEIGHT +}; + struct amdgpu_device; -struct kfd_bo_va_list { - struct list_head bo_list; - struct amdgpu_bo_va *bo_va; - void *kgd_dev; +enum kfd_mem_attachment_type { + KFD_MEM_ATT_SHARED, /* Share kgd_mem->bo or another attachment's */ + KFD_MEM_ATT_USERPTR, /* SG bo to DMA map pages from a userptr bo */ + KFD_MEM_ATT_DMABUF, /* DMAbuf to DMA map TTM BOs */ +}; + +struct kfd_mem_attachment { + struct list_head list; + enum kfd_mem_attachment_type type; bool is_mapped; + struct amdgpu_bo_va *bo_va; + struct amdgpu_device *adev; uint64_t va; uint64_t pte_flags; }; @@ -50,7 +63,8 @@ struct kfd_bo_va_list { struct kgd_mem { struct mutex lock; struct amdgpu_bo *bo; - struct list_head bo_va_list; + struct dma_buf *dmabuf; + struct list_head attachments; /* protected by amdkfd_process_info.lock */ struct ttm_validate_buffer validate_list; struct ttm_validate_buffer resv_list; @@ -75,6 +89,7 @@ struct amdgpu_amdkfd_fence { struct mm_struct *mm; spinlock_t lock; char timeline_name[TASK_COMM_LEN]; + struct svm_range_bo *svm_bo; }; struct amdgpu_kfd_dev { @@ -127,14 +142,15 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, const void *ih_ring_entry); void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); void amdgpu_amdkfd_device_init(struct amdgpu_device *adev); -void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev); +void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev); int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, uint32_t vmid, uint64_t gpu_addr, uint32_t *ib_cmd, uint32_t ib_len); void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle); bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd); int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid); -int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid); +int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid, + enum TLB_FLUSH_TYPE flush_type); bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); @@ -148,7 +164,8 @@ int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, int queue_bit); struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, - struct mm_struct *mm); + struct mm_struct *mm, + struct svm_range_bo *svm_bo); #if IS_ENABLED(CONFIG_HSA_AMD) bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); @@ -234,22 +251,27 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s }) /* GPUVM API */ +#define drm_priv_to_vm(drm_priv) \ + (&((struct amdgpu_fpriv *) \ + ((struct drm_file *)(drm_priv))->driver_priv)->vm) + int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, struct file *filp, u32 pasid, - void **vm, void **process_info, + void **process_info, struct dma_fence **ef); -void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm); -uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm); +void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv); +uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv); int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct kgd_dev *kgd, uint64_t va, uint64_t size, - void *vm, struct kgd_mem **mem, + void *drm_priv, struct kgd_mem **mem, uint64_t *offset, uint32_t flags); int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size); + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, + uint64_t *size); int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *vm); + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed); int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *vm); + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_sync_memory( struct kgd_dev *kgd, struct kgd_mem *mem, bool intr); int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, @@ -260,7 +282,7 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, struct kfd_vm_fault_info *info); int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, struct dma_buf *dmabuf, - uint64_t va, void *vm, + uint64_t va, void *drm_priv, struct kgd_mem **mem, uint64_t *size, uint64_t *mmap_offset); int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, @@ -270,6 +292,7 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void); void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); +void amdgpu_amdkfd_reserve_system_mem(uint64_t size); #else static inline void amdgpu_amdkfd_gpuvm_init_mem_limits(void) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 9ef9f3ddad48..6409d6b1b2df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -25,6 +25,7 @@ #include <linux/firmware.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" +#include "amdgpu_amdkfd_arcturus.h" #include "sdma0/sdma0_4_2_2_offset.h" #include "sdma0/sdma0_4_2_2_sh_mask.h" #include "sdma1/sdma1_4_2_2_offset.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c index 5af464933976..1d0dbff87d3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c @@ -28,6 +28,7 @@ #include <linux/slab.h> #include <linux/sched/mm.h> #include "amdgpu_amdkfd.h" +#include "kfd_svm.h" static const struct dma_fence_ops amdkfd_fence_ops; static atomic_t fence_seq = ATOMIC_INIT(0); @@ -60,7 +61,8 @@ static atomic_t fence_seq = ATOMIC_INIT(0); */ struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, - struct mm_struct *mm) + struct mm_struct *mm, + struct svm_range_bo *svm_bo) { struct amdgpu_amdkfd_fence *fence; @@ -73,7 +75,7 @@ struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, fence->mm = mm; get_task_comm(fence->timeline_name, current); spin_lock_init(&fence->lock); - + fence->svm_bo = svm_bo; dma_fence_init(&fence->base, &amdkfd_fence_ops, &fence->lock, context, atomic_inc_return(&fence_seq)); @@ -111,6 +113,8 @@ static const char *amdkfd_fence_get_timeline_name(struct dma_fence *f) * a KFD BO and schedules a job to move the BO. * If fence is already signaled return true. * If fence is not signaled schedule a evict KFD process work item. + * + * @f: dma_fence */ static bool amdkfd_fence_enable_signaling(struct dma_fence *f) { @@ -122,16 +126,20 @@ static bool amdkfd_fence_enable_signaling(struct dma_fence *f) if (dma_fence_is_signaled(f)) return true; - if (!kgd2kfd_schedule_evict_and_restore_process(fence->mm, f)) - return true; - + if (!fence->svm_bo) { + if (!kgd2kfd_schedule_evict_and_restore_process(fence->mm, f)) + return true; + } else { + if (!svm_range_schedule_evict_svm_bo(fence)) + return true; + } return false; } /** * amdkfd_fence_release - callback that fence can be freed * - * @fence: fence + * @f: dma_fence * * This function is called when the reference count becomes zero. * Drops the mm_struct reference and RCU schedules freeing up the fence. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 62aa1a6f64ed..491acdf92f73 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -96,8 +96,8 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, lock_srbm(kgd, 0, 0, 0, vmid); - WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config); - WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases); + WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config); + WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases); /* APE1 no longer exists on GFX9 */ unlock_srbm(kgd); @@ -161,7 +161,7 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) lock_srbm(kgd, mec, pipe, 0, 0); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL), + WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); @@ -239,13 +239,13 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, for (reg = hqd_base; reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) - WREG32(reg, mqd_hqd[reg - hqd_base]); + WREG32_SOC15_IP(GC, reg, mqd_hqd[reg - hqd_base]); /* Activate doorbell logic before triggering WPTR poll. */ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data); + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, data); if (wptr) { /* Don't read wptr with get_user because the user @@ -274,27 +274,27 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO), + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, lower_32_bits(guessed_wptr)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI), + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, upper_32_bits(guessed_wptr)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR), + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR, lower_32_bits((uint64_t)wptr)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, upper_32_bits((uint64_t)wptr)); pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__, (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1), + WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1, (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); } /* Start the EOP fetcher */ - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR), + WREG32_SOC15(GC, 0, mmCP_HQD_EOP_RPTR, REG_SET_FIELD(m->cp_hqd_eop_rptr, CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data); + WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data); release_queue(kgd); @@ -365,7 +365,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ break; \ (*dump)[i][0] = (addr) << 2; \ - (*dump)[i++][1] = RREG32(addr); \ + (*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \ } while (0) *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); @@ -497,13 +497,13 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, uint32_t low, high; acquire_queue(kgd, pipe_id, queue_id); - act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); + act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); high = upper_32_bits(queue_address >> 8); - if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) && - high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI))) + if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) && + high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI)) retval = true; } release_queue(kgd); @@ -621,11 +621,11 @@ loop: preempt_enable(); #endif - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type); + WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, type); end_jiffies = (utimeout * HZ / 1000) + jiffies; while (true) { - temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); + temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) break; if (time_after(jiffies, end_jiffies)) { @@ -716,8 +716,8 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd, mutex_lock(&adev->grbm_idx_mutex); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val); - WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val); + WREG32_SOC15(GC, 0, mmSQ_CMD, sq_cmd); data = REG_SET_FIELD(data, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); @@ -726,7 +726,7 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd, data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); mutex_unlock(&adev->grbm_idx_mutex); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index d39cff4a1fe3..1f5620cc3570 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -95,8 +95,8 @@ static void program_sh_mem_settings_v10_3(struct kgd_dev *kgd, uint32_t vmid, lock_srbm(kgd, 0, 0, 0, vmid); - WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config); - WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases); + WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config); + WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases); /* APE1 no longer exists on GFX9 */ unlock_srbm(kgd); @@ -129,7 +129,7 @@ static int init_interrupts_v10_3(struct kgd_dev *kgd, uint32_t pipe_id) lock_srbm(kgd, mec, pipe, 0, 0); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL), + WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); @@ -212,10 +212,10 @@ static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", mec, pipe, queue_id); - value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS)); + value = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS); value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1, ((mec << 5) | (pipe << 3) | queue_id | 0x80)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value); + WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, value); } /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ @@ -224,13 +224,13 @@ static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, for (reg = hqd_base; reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) - WREG32(reg, mqd_hqd[reg - hqd_base]); + WREG32_SOC15_IP(GC, reg, mqd_hqd[reg - hqd_base]); /* Activate doorbell logic before triggering WPTR poll. */ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data); + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, data); if (wptr) { /* Don't read wptr with get_user because the user @@ -259,17 +259,17 @@ static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO), + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, lower_32_bits(guessed_wptr)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI), + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, upper_32_bits(guessed_wptr)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR), + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR, lower_32_bits((uint64_t)wptr)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, upper_32_bits((uint64_t)wptr)); pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__, (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1), + WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1, (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); } @@ -279,7 +279,7 @@ static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data); + WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data); release_queue(kgd); @@ -350,7 +350,7 @@ static int hqd_dump_v10_3(struct kgd_dev *kgd, if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ break; \ (*dump)[i][0] = (addr) << 2; \ - (*dump)[i++][1] = RREG32(addr); \ + (*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \ } while (0) *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); @@ -482,13 +482,13 @@ static bool hqd_is_occupied_v10_3(struct kgd_dev *kgd, uint64_t queue_address, uint32_t low, high; acquire_queue(kgd, pipe_id, queue_id); - act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); + act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); high = upper_32_bits(queue_address >> 8); - if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) && - high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI))) + if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) && + high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI)) retval = true; } release_queue(kgd); @@ -542,11 +542,11 @@ static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd, break; } - WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type); + WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, type); end_jiffies = (utimeout * HZ / 1000) + jiffies; while (true) { - temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); + temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) break; if (time_after(jiffies, end_jiffies)) { @@ -626,7 +626,7 @@ static int wave_control_execute_v10_3(struct kgd_dev *kgd, mutex_lock(&adev->grbm_idx_mutex); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val); WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd); data = REG_SET_FIELD(data, GRBM_GFX_INDEX, @@ -636,7 +636,7 @@ static int wave_control_execute_v10_3(struct kgd_dev *kgd, data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); mutex_unlock(&adev->grbm_idx_mutex); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index b43e68fc1378..ed3014fbb563 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -719,7 +719,7 @@ static void unlock_spi_csq_mutexes(struct amdgpu_device *adev) } /** - * @get_wave_count: Read device registers to get number of waves in flight for + * get_wave_count: Read device registers to get number of waves in flight for * a particular queue. The method also returns the VMID associated with the * queue. * @@ -755,19 +755,19 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, } /** - * @kgd_gfx_v9_get_cu_occupancy: Reads relevant registers associated with each + * kgd_gfx_v9_get_cu_occupancy: Reads relevant registers associated with each * shader engine and aggregates the number of waves that are in flight for the * process whose pasid is provided as a parameter. The process could have ZERO * or more queues running and submitting waves to compute units. * * @kgd: Handle of device from which to get number of waves in flight * @pasid: Identifies the process for which this query call is invoked - * @wave_cnt: Output parameter updated with number of waves in flight that + * @pasid_wave_cnt: Output parameter updated with number of waves in flight that * belong to process with given pasid * @max_waves_per_cu: Output parameter updated with maximum number of waves * possible per Compute Unit * - * @note: It's possible that the device has too many queues (oversubscription) + * Note: It's possible that the device has too many queues (oversubscription) * in which case a VMID could be remapped to a different PASID. This could lead * to an iaccurate wave count. Following is a high-level sequence: * Time T1: vmid = getVmid(); vmid is associated with Pasid P1 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 7d4118c8128a..3b8e1ee8c475 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -33,9 +33,6 @@ #include <uapi/linux/kfd_ioctl.h> #include "amdgpu_xgmi.h" -/* BO flag to indicate a KFD userptr BO */ -#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63) - /* Userptr restore delay, just long enough to allow consecutive VM * changes to accumulate */ @@ -50,12 +47,6 @@ static struct { spinlock_t mem_limit_lock; } kfd_mem_limit; -/* Struct used for amdgpu_amdkfd_bo_validate */ -struct amdgpu_vm_parser { - uint32_t domain; - bool wait; -}; - static const char * const domain_bit_to_string[] = { "CPU", "GTT", @@ -75,16 +66,16 @@ static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) return (struct amdgpu_device *)kgd; } -static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm, +static bool kfd_mem_is_attached(struct amdgpu_vm *avm, struct kgd_mem *mem) { - struct kfd_bo_va_list *entry; + struct kfd_mem_attachment *entry; - list_for_each_entry(entry, &mem->bo_va_list, bo_list) + list_for_each_entry(entry, &mem->attachments, list) if (entry->bo_va->base.vm == avm) - return false; + return true; - return true; + return false; } /* Set memory usage limits. Current, limits are @@ -108,6 +99,11 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void) (kfd_mem_limit.max_ttm_mem_limit >> 20)); } +void amdgpu_amdkfd_reserve_system_mem(uint64_t size) +{ + kfd_mem_limit.system_mem_used += size; +} + /* Estimate page table size needed to represent a given memory size * * With 4KB pages, we need one 8 byte PTE for each 4KB of memory @@ -217,7 +213,7 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) u32 domain = bo->preferred_domains; bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU); - if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) { + if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) { domain = AMDGPU_GEM_DOMAIN_CPU; sg = false; } @@ -245,7 +241,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, if (!ef) return -EINVAL; - old = dma_resv_get_list(resv); + old = dma_resv_shared_list(resv); if (!old) return 0; @@ -276,7 +272,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, write_seqcount_end(&resv->seq); /* Drop the references to the removed fences or move them to ef_list */ - for (i = j, k = 0; i < old->shared_count; ++i) { + for (i = j; i < old->shared_count; ++i) { struct dma_fence *f; f = rcu_dereference_protected(new->shared[i], @@ -346,11 +342,9 @@ validate_fail: return ret; } -static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo) +static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) { - struct amdgpu_vm_parser *p = param; - - return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait); + return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false); } /* vm_validate_pt_pd_bos - Validate page table and directory BOs @@ -362,28 +356,23 @@ static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo) */ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) { - struct amdgpu_bo *pd = vm->root.base.bo; + struct amdgpu_bo *pd = vm->root.bo; struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); - struct amdgpu_vm_parser param; int ret; - param.domain = AMDGPU_GEM_DOMAIN_VRAM; - param.wait = false; - - ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate, - ¶m); + ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL); if (ret) { pr_err("failed to validate PT BOs\n"); return ret; } - ret = amdgpu_amdkfd_validate(¶m, pd); + ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd); if (ret) { pr_err("failed to validate PD\n"); return ret; } - vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); + vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo); if (vm->use_cpu_for_update) { ret = amdgpu_bo_kmap(pd, NULL); @@ -398,7 +387,7 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm) static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) { - struct amdgpu_bo *pd = vm->root.base.bo; + struct amdgpu_bo *pd = vm->root.bo; struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); int ret; @@ -431,7 +420,8 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) mapping_flags |= coherent ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; else - mapping_flags |= AMDGPU_VM_MTYPE_UC; + mapping_flags |= coherent ? + AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; } else { mapping_flags |= coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; @@ -450,7 +440,8 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) if (adev->gmc.xgmi.connected_to_cpu) snoop = true; } else { - mapping_flags |= AMDGPU_VM_MTYPE_UC; + mapping_flags |= coherent ? + AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; if (amdgpu_xgmi_same_hive(adev, bo_adev)) snoop = true; } @@ -471,87 +462,320 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) return pte_flags; } -/* add_bo_to_vm - Add a BO to a VM +static int +kfd_mem_dmamap_userptr(struct kgd_mem *mem, + struct kfd_mem_attachment *attachment) +{ + enum dma_data_direction direction = + mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + struct ttm_operation_ctx ctx = {.interruptible = true}; + struct amdgpu_bo *bo = attachment->bo_va->base.bo; + struct amdgpu_device *adev = attachment->adev; + struct ttm_tt *src_ttm = mem->bo->tbo.ttm; + struct ttm_tt *ttm = bo->tbo.ttm; + int ret; + + ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL); + if (unlikely(!ttm->sg)) + return -ENOMEM; + + if (WARN_ON(ttm->num_pages != src_ttm->num_pages)) + return -EINVAL; + + /* Same sequence as in amdgpu_ttm_tt_pin_userptr */ + ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages, + ttm->num_pages, 0, + (u64)ttm->num_pages << PAGE_SHIFT, + GFP_KERNEL); + if (unlikely(ret)) + goto free_sg; + + ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); + if (unlikely(ret)) + goto release_sg; + + drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address, + ttm->num_pages); + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret) + goto unmap_sg; + + return 0; + +unmap_sg: + dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); +release_sg: + pr_err("DMA map userptr failed: %d\n", ret); + sg_free_table(ttm->sg); +free_sg: + kfree(ttm->sg); + ttm->sg = NULL; + return ret; +} + +static int +kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment) +{ + struct ttm_operation_ctx ctx = {.interruptible = true}; + struct amdgpu_bo *bo = attachment->bo_va->base.bo; + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); + return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); +} + +static int +kfd_mem_dmamap_attachment(struct kgd_mem *mem, + struct kfd_mem_attachment *attachment) +{ + switch (attachment->type) { + case KFD_MEM_ATT_SHARED: + return 0; + case KFD_MEM_ATT_USERPTR: + return kfd_mem_dmamap_userptr(mem, attachment); + case KFD_MEM_ATT_DMABUF: + return kfd_mem_dmamap_dmabuf(attachment); + default: + WARN_ON_ONCE(1); + } + return -EINVAL; +} + +static void +kfd_mem_dmaunmap_userptr(struct kgd_mem *mem, + struct kfd_mem_attachment *attachment) +{ + enum dma_data_direction direction = + mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + struct ttm_operation_ctx ctx = {.interruptible = false}; + struct amdgpu_bo *bo = attachment->bo_va->base.bo; + struct amdgpu_device *adev = attachment->adev; + struct ttm_tt *ttm = bo->tbo.ttm; + + if (unlikely(!ttm->sg)) + return; + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); + ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + + dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); + sg_free_table(ttm->sg); + ttm->sg = NULL; +} + +static void +kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment) +{ + struct ttm_operation_ctx ctx = {.interruptible = true}; + struct amdgpu_bo *bo = attachment->bo_va->base.bo; + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); + ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); +} + +static void +kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, + struct kfd_mem_attachment *attachment) +{ + switch (attachment->type) { + case KFD_MEM_ATT_SHARED: + break; + case KFD_MEM_ATT_USERPTR: + kfd_mem_dmaunmap_userptr(mem, attachment); + break; + case KFD_MEM_ATT_DMABUF: + kfd_mem_dmaunmap_dmabuf(attachment); + break; + default: + WARN_ON_ONCE(1); + } +} + +static int +kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem, + struct amdgpu_bo **bo) +{ + unsigned long bo_size = mem->bo->tbo.base.size; + struct drm_gem_object *gobj; + int ret; + + ret = amdgpu_bo_reserve(mem->bo, false); + if (ret) + return ret; + + ret = amdgpu_gem_object_create(adev, bo_size, 1, + AMDGPU_GEM_DOMAIN_CPU, + AMDGPU_GEM_CREATE_PREEMPTIBLE, + ttm_bo_type_sg, mem->bo->tbo.base.resv, + &gobj); + amdgpu_bo_unreserve(mem->bo); + if (ret) + return ret; + + *bo = gem_to_amdgpu_bo(gobj); + (*bo)->parent = amdgpu_bo_ref(mem->bo); + + return 0; +} + +static int +kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, + struct amdgpu_bo **bo) +{ + struct drm_gem_object *gobj; + int ret; + + if (!mem->dmabuf) { + mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base, + mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? + DRM_RDWR : 0); + if (IS_ERR(mem->dmabuf)) { + ret = PTR_ERR(mem->dmabuf); + mem->dmabuf = NULL; + return ret; + } + } + + gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf); + if (IS_ERR(gobj)) + return PTR_ERR(gobj); + + /* Import takes an extra reference on the dmabuf. Drop it now to + * avoid leaking it. We only need the one reference in + * kgd_mem->dmabuf. + */ + dma_buf_put(mem->dmabuf); + + *bo = gem_to_amdgpu_bo(gobj); + (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE; + (*bo)->parent = amdgpu_bo_ref(mem->bo); + + return 0; +} + +/* kfd_mem_attach - Add a BO to a VM * * Everything that needs to bo done only once when a BO is first added * to a VM. It can later be mapped and unmapped many times without * repeating these steps. * + * 0. Create BO for DMA mapping, if needed * 1. Allocate and initialize BO VA entry data structure * 2. Add BO to the VM * 3. Determine ASIC-specific PTE flags * 4. Alloc page tables and directories if needed * 4a. Validate new page tables and directories */ -static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, - struct amdgpu_vm *vm, bool is_aql, - struct kfd_bo_va_list **p_bo_va_entry) +static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, + struct amdgpu_vm *vm, bool is_aql) { - int ret; - struct kfd_bo_va_list *bo_va_entry; - struct amdgpu_bo *bo = mem->bo; + struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); + unsigned long bo_size = mem->bo->tbo.base.size; uint64_t va = mem->va; - struct list_head *list_bo_va = &mem->bo_va_list; - unsigned long bo_size = bo->tbo.base.size; + struct kfd_mem_attachment *attachment[2] = {NULL, NULL}; + struct amdgpu_bo *bo[2] = {NULL, NULL}; + int i, ret; if (!va) { pr_err("Invalid VA when adding BO to VM\n"); return -EINVAL; } - if (is_aql) - va += bo_size; - - bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL); - if (!bo_va_entry) - return -ENOMEM; + for (i = 0; i <= is_aql; i++) { + attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL); + if (unlikely(!attachment[i])) { + ret = -ENOMEM; + goto unwind; + } - pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, - va + bo_size, vm); + pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, + va + bo_size, vm); - /* Add BO to VM internal data structures*/ - bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo); - if (!bo_va_entry->bo_va) { - ret = -EINVAL; - pr_err("Failed to add BO object to VM. ret == %d\n", - ret); - goto err_vmadd; - } + if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM && + amdgpu_xgmi_same_hive(adev, bo_adev))) { + /* Mappings on the local GPU and VRAM mappings in the + * local hive share the original BO + */ + attachment[i]->type = KFD_MEM_ATT_SHARED; + bo[i] = mem->bo; + drm_gem_object_get(&bo[i]->tbo.base); + } else if (i > 0) { + /* Multiple mappings on the same GPU share the BO */ + attachment[i]->type = KFD_MEM_ATT_SHARED; + bo[i] = bo[0]; + drm_gem_object_get(&bo[i]->tbo.base); + } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { + /* Create an SG BO to DMA-map userptrs on other GPUs */ + attachment[i]->type = KFD_MEM_ATT_USERPTR; + ret = kfd_mem_attach_userptr(adev, mem, &bo[i]); + if (ret) + goto unwind; + } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT && + mem->bo->tbo.type != ttm_bo_type_sg) { + /* GTT BOs use DMA-mapping ability of dynamic-attach + * DMA bufs. TODO: The same should work for VRAM on + * large-BAR GPUs. + */ + attachment[i]->type = KFD_MEM_ATT_DMABUF; + ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]); + if (ret) + goto unwind; + } else { + /* FIXME: Need to DMA-map other BO types: + * large-BAR VRAM, doorbells, MMIO remap + */ + attachment[i]->type = KFD_MEM_ATT_SHARED; + bo[i] = mem->bo; + drm_gem_object_get(&bo[i]->tbo.base); + } - bo_va_entry->va = va; - bo_va_entry->pte_flags = get_pte_flags(adev, mem); - bo_va_entry->kgd_dev = (void *)adev; - list_add(&bo_va_entry->bo_list, list_bo_va); + /* Add BO to VM internal data structures */ + attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); + if (unlikely(!attachment[i]->bo_va)) { + ret = -ENOMEM; + pr_err("Failed to add BO object to VM. ret == %d\n", + ret); + goto unwind; + } - if (p_bo_va_entry) - *p_bo_va_entry = bo_va_entry; + attachment[i]->va = va; + attachment[i]->pte_flags = get_pte_flags(adev, mem); + attachment[i]->adev = adev; + list_add(&attachment[i]->list, &mem->attachments); - /* Allocate validate page tables if needed */ - ret = vm_validate_pt_pd_bos(vm); - if (ret) { - pr_err("validate_pt_pd_bos() failed\n"); - goto err_alloc_pts; + va += bo_size; } return 0; -err_alloc_pts: - amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va); - list_del(&bo_va_entry->bo_list); -err_vmadd: - kfree(bo_va_entry); +unwind: + for (; i >= 0; i--) { + if (!attachment[i]) + continue; + if (attachment[i]->bo_va) { + amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va); + list_del(&attachment[i]->list); + } + if (bo[i]) + drm_gem_object_put(&bo[i]->tbo.base); + kfree(attachment[i]); + } return ret; } -static void remove_bo_from_vm(struct amdgpu_device *adev, - struct kfd_bo_va_list *entry, unsigned long size) +static void kfd_mem_detach(struct kfd_mem_attachment *attachment) { - pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n", - entry->va, - entry->va + size, entry); - amdgpu_vm_bo_rmv(adev, entry->bo_va); - list_del(&entry->bo_list); - kfree(entry); + struct amdgpu_bo *bo = attachment->bo_va->base.bo; + + pr_debug("\t remove VA 0x%llx in entry %p\n", + attachment->va, attachment); + amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va); + drm_gem_object_put(&bo->tbo.base); + list_del(&attachment->list); + kfree(attachment); } static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, @@ -726,7 +950,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, struct bo_vm_reservation_context *ctx) { struct amdgpu_bo *bo = mem->bo; - struct kfd_bo_va_list *entry; + struct kfd_mem_attachment *entry; unsigned int i; int ret; @@ -738,7 +962,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, INIT_LIST_HEAD(&ctx->list); INIT_LIST_HEAD(&ctx->duplicates); - list_for_each_entry(entry, &mem->bo_va_list, bo_list) { + list_for_each_entry(entry, &mem->attachments, list) { if ((vm && vm != entry->bo_va->base.vm) || (entry->is_mapped != map_type && map_type != BO_VM_ALL)) @@ -760,7 +984,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, list_add(&ctx->kfd_bo.tv.head, &ctx->list); i = 0; - list_for_each_entry(entry, &mem->bo_va_list, bo_list) { + list_for_each_entry(entry, &mem->attachments, list) { if ((vm && vm != entry->bo_va->base.vm) || (entry->is_mapped != map_type && map_type != BO_VM_ALL)) @@ -814,11 +1038,12 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, return ret; } -static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, - struct kfd_bo_va_list *entry, +static void unmap_bo_from_gpuvm(struct kgd_mem *mem, + struct kfd_mem_attachment *entry, struct amdgpu_sync *sync) { struct amdgpu_bo_va *bo_va = entry->bo_va; + struct amdgpu_device *adev = entry->adev; struct amdgpu_vm *vm = bo_va->base.vm; amdgpu_vm_bo_unmap(adev, bo_va, entry->va); @@ -827,18 +1052,24 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, amdgpu_sync_fence(sync, bo_va->last_pt_update); - return 0; + kfd_mem_dmaunmap_attachment(mem, entry); } -static int update_gpuvm_pte(struct amdgpu_device *adev, - struct kfd_bo_va_list *entry, - struct amdgpu_sync *sync) +static int update_gpuvm_pte(struct kgd_mem *mem, + struct kfd_mem_attachment *entry, + struct amdgpu_sync *sync, + bool *table_freed) { - int ret; struct amdgpu_bo_va *bo_va = entry->bo_va; + struct amdgpu_device *adev = entry->adev; + int ret; + + ret = kfd_mem_dmamap_attachment(mem, entry); + if (ret) + return ret; /* Update the page tables */ - ret = amdgpu_vm_bo_update(adev, bo_va, false); + ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed); if (ret) { pr_err("amdgpu_vm_bo_update failed\n"); return ret; @@ -847,14 +1078,16 @@ static int update_gpuvm_pte(struct amdgpu_device *adev, return amdgpu_sync_fence(sync, bo_va->last_pt_update); } -static int map_bo_to_gpuvm(struct amdgpu_device *adev, - struct kfd_bo_va_list *entry, struct amdgpu_sync *sync, - bool no_update_pte) +static int map_bo_to_gpuvm(struct kgd_mem *mem, + struct kfd_mem_attachment *entry, + struct amdgpu_sync *sync, + bool no_update_pte, + bool *table_freed) { int ret; /* Set virtual address for the allocation */ - ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0, + ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0, amdgpu_bo_size(entry->bo_va->base.bo), entry->pte_flags); if (ret) { @@ -866,7 +1099,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev, if (no_update_pte) return 0; - ret = update_gpuvm_pte(adev, entry, sync); + ret = update_gpuvm_pte(mem, entry, sync, table_freed); if (ret) { pr_err("update_gpuvm_pte() failed\n"); goto update_gpuvm_pte_failed; @@ -875,7 +1108,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev, return 0; update_gpuvm_pte_failed: - unmap_bo_from_gpuvm(adev, entry, sync); + unmap_bo_from_gpuvm(mem, entry, sync); return ret; } @@ -920,7 +1153,7 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info, list_for_each_entry(peer_vm, &process_info->vm_list_head, vm_list_node) { - struct amdgpu_bo *pd = peer_vm->root.base.bo; + struct amdgpu_bo *pd = peer_vm->root.bo; ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv, AMDGPU_SYNC_NE_OWNER, @@ -967,7 +1200,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, info->eviction_fence = amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), - current->mm); + current->mm, + NULL); if (!info->eviction_fence) { pr_err("Failed to create eviction fence\n"); ret = -ENOMEM; @@ -986,7 +1220,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, vm->process_info = *process_info; /* Validate page directory and attach eviction fence */ - ret = amdgpu_bo_reserve(vm->root.base.bo, true); + ret = amdgpu_bo_reserve(vm->root.bo, true); if (ret) goto reserve_pd_fail; ret = vm_validate_pt_pd_bos(vm); @@ -994,16 +1228,16 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, pr_err("validate_pt_pd_bos() failed\n"); goto validate_pd_fail; } - ret = amdgpu_bo_sync_wait(vm->root.base.bo, + ret = amdgpu_bo_sync_wait(vm->root.bo, AMDGPU_FENCE_OWNER_KFD, false); if (ret) goto wait_pd_fail; - ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1); + ret = dma_resv_reserve_shared(vm->root.bo->tbo.base.resv, 1); if (ret) goto reserve_shared_fail; - amdgpu_bo_fence(vm->root.base.bo, + amdgpu_bo_fence(vm->root.bo, &vm->process_info->eviction_fence->base, true); - amdgpu_bo_unreserve(vm->root.base.bo); + amdgpu_bo_unreserve(vm->root.bo); /* Update process info */ mutex_lock(&vm->process_info->lock); @@ -1017,7 +1251,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, reserve_shared_fail: wait_pd_fail: validate_pd_fail: - amdgpu_bo_unreserve(vm->root.base.bo); + amdgpu_bo_unreserve(vm->root.bo); reserve_pd_fail: vm->process_info = NULL; if (info) { @@ -1036,15 +1270,19 @@ create_evict_fence_fail: int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, struct file *filp, u32 pasid, - void **vm, void **process_info, + void **process_info, struct dma_fence **ef) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - struct drm_file *drm_priv = filp->private_data; - struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv; - struct amdgpu_vm *avm = &drv_priv->vm; + struct amdgpu_fpriv *drv_priv; + struct amdgpu_vm *avm; int ret; + ret = amdgpu_file_to_fpriv(filp, &drv_priv); + if (ret) + return ret; + avm = &drv_priv->vm; + /* Already a compute VM? */ if (avm->process_info) return -EINVAL; @@ -1059,7 +1297,7 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, if (ret) return ret; - *vm = (void *)avm; + amdgpu_vm_set_task_info(avm); return 0; } @@ -1068,7 +1306,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct amdkfd_process_info *process_info = vm->process_info; - struct amdgpu_bo *pd = vm->root.base.bo; + struct amdgpu_bo *pd = vm->root.bo; if (!process_info) return; @@ -1100,15 +1338,17 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, } } -void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm) +void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; + struct amdgpu_vm *avm; - if (WARN_ON(!kgd || !vm)) + if (WARN_ON(!kgd || !drm_priv)) return; - pr_debug("Releasing process vm %p\n", vm); + avm = drm_priv_to_vm(drm_priv); + + pr_debug("Releasing process vm %p\n", avm); /* The original pasid of amdgpu vm has already been * released during making a amdgpu vm to a compute vm @@ -1119,10 +1359,10 @@ void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm) amdgpu_vm_release_compute(adev, avm); } -uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm) +uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv) { - struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; - struct amdgpu_bo *pd = avm->root.base.bo; + struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); + struct amdgpu_bo *pd = avm->root.bo; struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); if (adev->asic_type < CHIP_VEGA10) @@ -1132,11 +1372,11 @@ uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm) int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct kgd_dev *kgd, uint64_t va, uint64_t size, - void *vm, struct kgd_mem **mem, + void *drm_priv, struct kgd_mem **mem, uint64_t *offset, uint32_t flags) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; + struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); enum ttm_bo_type bo_type = ttm_bo_type_device; struct sg_table *sg = NULL; uint64_t user_addr = 0; @@ -1161,7 +1401,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { domain = AMDGPU_GEM_DOMAIN_GTT; alloc_domain = AMDGPU_GEM_DOMAIN_CPU; - alloc_flags = 0; + alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE; if (!offset || !*offset) return -EINVAL; user_addr = untagged_addr(*offset); @@ -1185,7 +1425,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ret = -ENOMEM; goto err; } - INIT_LIST_HEAD(&(*mem)->bo_va_list); + INIT_LIST_HEAD(&(*mem)->attachments); mutex_init(&(*mem)->lock); (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); @@ -1216,6 +1456,11 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( domain_string(alloc_domain), ret); goto err_bo_create; } + ret = drm_vma_node_allow(&gobj->vma_node, drm_priv); + if (ret) { + pr_debug("Failed to allow vma node access. ret %d\n", ret); + goto err_node_allow; + } bo = gem_to_amdgpu_bo(gobj); if (bo_type == ttm_bo_type_sg) { bo->tbo.sg = sg; @@ -1224,7 +1469,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( bo->kfd_bo = *mem; (*mem)->bo = bo; if (user_addr) - bo->flags |= AMDGPU_AMDKFD_USERPTR_BO; + bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO; (*mem)->va = va; (*mem)->domain = domain; @@ -1245,6 +1490,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( allocate_init_user_pages_failed: remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); + drm_vma_node_revoke(&gobj->vma_node, drm_priv); +err_node_allow: amdgpu_bo_unref(&bo); /* Don't unreserve system mem limit twice */ goto err_reserve_limit; @@ -1262,11 +1509,12 @@ err: } int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size) + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, + uint64_t *size) { struct amdkfd_process_info *process_info = mem->process_info; unsigned long bo_size = mem->bo->tbo.base.size; - struct kfd_bo_va_list *entry, *tmp; + struct kfd_mem_attachment *entry, *tmp; struct bo_vm_reservation_context ctx; struct ttm_validate_buffer *bo_list_entry; unsigned int mapped_to_gpu_memory; @@ -1309,13 +1557,12 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, mem->va + bo_size * (1 + mem->aql_queue)); - /* Remove from VM internal data structures */ - list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list) - remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev, - entry, bo_size); - ret = unreserve_bo_and_vms(&ctx, false, false); + /* Remove from VM internal data structures */ + list_for_each_entry_safe(entry, tmp, &mem->attachments, list) + kfd_mem_detach(entry); + /* Free the sync object */ amdgpu_sync_free(&mem->sync); @@ -1339,6 +1586,9 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( } /* Free the BO*/ + drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); + if (mem->dmabuf) + dma_buf_put(mem->dmabuf); drm_gem_object_put(&mem->bo->tbo.base); mutex_destroy(&mem->lock); kfree(mem); @@ -1347,17 +1597,16 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( } int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *vm) + struct kgd_dev *kgd, struct kgd_mem *mem, + void *drm_priv, bool *table_freed) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; + struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); int ret; struct amdgpu_bo *bo; uint32_t domain; - struct kfd_bo_va_list *entry; + struct kfd_mem_attachment *entry; struct bo_vm_reservation_context ctx; - struct kfd_bo_va_list *bo_va_entry = NULL; - struct kfd_bo_va_list *bo_va_entry_aql = NULL; unsigned long bo_size; bool is_invalid_userptr = false; @@ -1391,9 +1640,15 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n", mem->va, mem->va + bo_size * (1 + mem->aql_queue), - vm, domain_string(domain)); + avm, domain_string(domain)); - ret = reserve_bo_and_vm(mem, vm, &ctx); + if (!kfd_mem_is_attached(avm, mem)) { + ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue); + if (ret) + goto out; + } + + ret = reserve_bo_and_vm(mem, avm, &ctx); if (unlikely(ret)) goto out; @@ -1403,25 +1658,12 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( * the next restore worker */ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && - bo->tbo.mem.mem_type == TTM_PL_SYSTEM) + bo->tbo.resource->mem_type == TTM_PL_SYSTEM) is_invalid_userptr = true; - if (check_if_add_bo_to_vm(avm, mem)) { - ret = add_bo_to_vm(adev, mem, avm, false, - &bo_va_entry); - if (ret) - goto add_bo_to_vm_failed; - if (mem->aql_queue) { - ret = add_bo_to_vm(adev, mem, avm, - true, &bo_va_entry_aql); - if (ret) - goto add_bo_to_vm_failed_aql; - } - } else { - ret = vm_validate_pt_pd_bos(avm); - if (unlikely(ret)) - goto add_bo_to_vm_failed; - } + ret = vm_validate_pt_pd_bos(avm); + if (unlikely(ret)) + goto out_unreserve; if (mem->mapped_to_gpu_memory == 0 && !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { @@ -1432,34 +1674,34 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( ret = amdgpu_amdkfd_bo_validate(bo, domain, true); if (ret) { pr_debug("Validate failed\n"); - goto map_bo_to_gpuvm_failed; + goto out_unreserve; } } - list_for_each_entry(entry, &mem->bo_va_list, bo_list) { - if (entry->bo_va->base.vm == vm && !entry->is_mapped) { - pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", - entry->va, entry->va + bo_size, - entry); + list_for_each_entry(entry, &mem->attachments, list) { + if (entry->bo_va->base.vm != avm || entry->is_mapped) + continue; - ret = map_bo_to_gpuvm(adev, entry, ctx.sync, - is_invalid_userptr); - if (ret) { - pr_err("Failed to map bo to gpuvm\n"); - goto map_bo_to_gpuvm_failed; - } + pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", + entry->va, entry->va + bo_size, entry); - ret = vm_update_pds(vm, ctx.sync); - if (ret) { - pr_err("Failed to update page directories\n"); - goto map_bo_to_gpuvm_failed; - } + ret = map_bo_to_gpuvm(mem, entry, ctx.sync, + is_invalid_userptr, table_freed); + if (ret) { + pr_err("Failed to map bo to gpuvm\n"); + goto out_unreserve; + } - entry->is_mapped = true; - mem->mapped_to_gpu_memory++; - pr_debug("\t INC mapping count %d\n", - mem->mapped_to_gpu_memory); + ret = vm_update_pds(avm, ctx.sync); + if (ret) { + pr_err("Failed to update page directories\n"); + goto out_unreserve; } + + entry->is_mapped = true; + mem->mapped_to_gpu_memory++; + pr_debug("\t INC mapping count %d\n", + mem->mapped_to_gpu_memory); } if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count) @@ -1470,13 +1712,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( goto out; -map_bo_to_gpuvm_failed: - if (bo_va_entry_aql) - remove_bo_from_vm(adev, bo_va_entry_aql, bo_size); -add_bo_to_vm_failed_aql: - if (bo_va_entry) - remove_bo_from_vm(adev, bo_va_entry, bo_size); -add_bo_to_vm_failed: +out_unreserve: unreserve_bo_and_vms(&ctx, false, false); out: mutex_unlock(&mem->process_info->lock); @@ -1485,19 +1721,18 @@ out: } int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *vm) + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - struct amdkfd_process_info *process_info = - ((struct amdgpu_vm *)vm)->process_info; + struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); + struct amdkfd_process_info *process_info = avm->process_info; unsigned long bo_size = mem->bo->tbo.base.size; - struct kfd_bo_va_list *entry; + struct kfd_mem_attachment *entry; struct bo_vm_reservation_context ctx; int ret; mutex_lock(&mem->lock); - ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx); + ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx); if (unlikely(ret)) goto out; /* If no VMs were reserved, it means the BO wasn't actually mapped */ @@ -1506,35 +1741,28 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( goto unreserve_out; } - ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm); + ret = vm_validate_pt_pd_bos(avm); if (unlikely(ret)) goto unreserve_out; pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n", mem->va, mem->va + bo_size * (1 + mem->aql_queue), - vm); - - list_for_each_entry(entry, &mem->bo_va_list, bo_list) { - if (entry->bo_va->base.vm == vm && entry->is_mapped) { - pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", - entry->va, - entry->va + bo_size, - entry); - - ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync); - if (ret == 0) { - entry->is_mapped = false; - } else { - pr_err("failed to unmap VA 0x%llx\n", - mem->va); - goto unreserve_out; - } + avm); - mem->mapped_to_gpu_memory--; - pr_debug("\t DEC mapping count %d\n", - mem->mapped_to_gpu_memory); - } + list_for_each_entry(entry, &mem->attachments, list) { + if (entry->bo_va->base.vm != avm || !entry->is_mapped) + continue; + + pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", + entry->va, entry->va + bo_size, entry); + + unmap_bo_from_gpuvm(mem, entry, ctx.sync); + entry->is_mapped = false; + + mem->mapped_to_gpu_memory--; + pr_debug("\t DEC mapping count %d\n", + mem->mapped_to_gpu_memory); } /* If BO is unmapped from all VMs, unfence it. It can be evicted if @@ -1642,14 +1870,15 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, struct dma_buf *dma_buf, - uint64_t va, void *vm, + uint64_t va, void *drm_priv, struct kgd_mem **mem, uint64_t *size, uint64_t *mmap_offset) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); struct drm_gem_object *obj; struct amdgpu_bo *bo; - struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; + int ret; if (dma_buf->ops != &amdgpu_dmabuf_ops) /* Can't handle non-graphics buffers */ @@ -1670,13 +1899,19 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, if (!*mem) return -ENOMEM; + ret = drm_vma_node_allow(&obj->vma_node, drm_priv); + if (ret) { + kfree(mem); + return ret; + } + if (size) *size = amdgpu_bo_size(bo); if (mmap_offset) *mmap_offset = amdgpu_bo_mmap_offset(bo); - INIT_LIST_HEAD(&(*mem)->bo_va_list); + INIT_LIST_HEAD(&(*mem)->attachments); mutex_init(&(*mem)->lock); (*mem)->alloc_flags = @@ -1873,7 +2108,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) list_for_each_entry_safe(mem, tmp_mem, &process_info->userptr_inval_list, validate_list.head) { - struct kfd_bo_va_list *bo_va_entry; + struct kfd_mem_attachment *attachment; bo = mem->bo; @@ -1896,13 +2131,12 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) * VM faults if the GPU tries to access the invalid * memory. */ - list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) { - if (!bo_va_entry->is_mapped) + list_for_each_entry(attachment, &mem->attachments, list) { + if (!attachment->is_mapped) continue; - ret = update_gpuvm_pte((struct amdgpu_device *) - bo_va_entry->kgd_dev, - bo_va_entry, &sync); + kfd_mem_dmaunmap_attachment(mem, attachment); + ret = update_gpuvm_pte(mem, attachment, &sync, NULL); if (ret) { pr_err("%s: update PTE failed\n", __func__); /* make sure this gets validated again */ @@ -2083,7 +2317,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) struct amdgpu_bo *bo = mem->bo; uint32_t domain = mem->domain; - struct kfd_bo_va_list *bo_va_entry; + struct kfd_mem_attachment *attachment; total_size += amdgpu_bo_size(bo); @@ -2103,12 +2337,12 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); goto validate_map_fail; } - list_for_each_entry(bo_va_entry, &mem->bo_va_list, - bo_list) { - ret = update_gpuvm_pte((struct amdgpu_device *) - bo_va_entry->kgd_dev, - bo_va_entry, - &sync_obj); + list_for_each_entry(attachment, &mem->attachments, list) { + if (!attachment->is_mapped) + continue; + + kfd_mem_dmaunmap_attachment(mem, attachment); + ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL); if (ret) { pr_debug("Memory eviction: update PTE failed. Try again\n"); goto validate_map_fail; @@ -2135,7 +2369,8 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) */ new_fence = amdgpu_amdkfd_fence_create( process_info->eviction_fence->base.context, - process_info->eviction_fence->mm); + process_info->eviction_fence->mm, + NULL); if (!new_fence) { pr_err("Failed to create eviction fence\n"); ret = -ENOMEM; @@ -2154,7 +2389,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) /* Attach eviction fence to PD / PT BOs */ list_for_each_entry(peer_vm, &process_info->vm_list_head, vm_list_node) { - struct amdgpu_bo *bo = peer_vm->root.base.bo; + struct amdgpu_bo *bo = peer_vm->root.bo; amdgpu_bo_fence(bo, &process_info->eviction_fence->base, true); } @@ -2182,7 +2417,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem return -ENOMEM; mutex_init(&(*mem)->lock); - INIT_LIST_HEAD(&(*mem)->bo_va_list); + INIT_LIST_HEAD(&(*mem)->attachments); (*mem)->bo = amdgpu_bo_ref(gws_bo); (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; (*mem)->process_info = process_info; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 494b2e1717d5..96b7bb13a2dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1768,6 +1768,15 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev, static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version, NULL); +static struct attribute *amdgpu_vbios_version_attrs[] = { + &dev_attr_vbios_version.attr, + NULL +}; + +const struct attribute_group amdgpu_vbios_version_attr_group = { + .attrs = amdgpu_vbios_version_attrs +}; + /** * amdgpu_atombios_fini - free the driver info and callbacks for atombios * @@ -1787,7 +1796,6 @@ void amdgpu_atombios_fini(struct amdgpu_device *adev) adev->mode_info.atom_context = NULL; kfree(adev->mode_info.atom_card_info); adev->mode_info.atom_card_info = NULL; - device_remove_file(adev->dev, &dev_attr_vbios_version); } /** @@ -1804,7 +1812,6 @@ int amdgpu_atombios_init(struct amdgpu_device *adev) { struct card_info *atom_card_info = kzalloc(sizeof(struct card_info), GFP_KERNEL); - int ret; if (!atom_card_info) return -ENOMEM; @@ -1828,17 +1835,14 @@ int amdgpu_atombios_init(struct amdgpu_device *adev) if (adev->is_atom_fw) { amdgpu_atomfirmware_scratch_regs_init(adev); amdgpu_atomfirmware_allocate_fb_scratch(adev); + /* cached firmware_flags for further usage */ + adev->mode_info.firmware_flags = + amdgpu_atomfirmware_query_firmware_capability(adev); } else { amdgpu_atombios_scratch_regs_init(adev); amdgpu_atombios_allocate_fb_scratch(adev); } - ret = device_create_file(adev->dev, &dev_attr_vbios_version); - if (ret) { - DRM_ERROR("Failed to create device file for VBIOS version\n"); - return ret; - } - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index 60716b35444b..3b5d13189073 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -29,23 +29,59 @@ #include "atombios.h" #include "soc15_hw_ip.h" -bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev) +union firmware_info { + struct atom_firmware_info_v3_1 v31; + struct atom_firmware_info_v3_2 v32; + struct atom_firmware_info_v3_3 v33; + struct atom_firmware_info_v3_4 v34; +}; + +/* + * Helper function to query firmware capability + * + * @adev: amdgpu_device pointer + * + * Return firmware_capability in firmwareinfo table on success or 0 if not + */ +uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *adev) { - int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - firmwareinfo); - uint16_t data_offset; + struct amdgpu_mode_info *mode_info = &adev->mode_info; + int index; + u16 data_offset, size; + union firmware_info *firmware_info; + u8 frev, crev; + u32 fw_cap = 0; - if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL, - NULL, NULL, &data_offset)) { - struct atom_firmware_info_v3_1 *firmware_info = - (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios + - data_offset); + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + firmwareinfo); - if (le32_to_cpu(firmware_info->firmware_capability) & - ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION) - return true; + if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, + index, &size, &frev, &crev, &data_offset)) { + /* support firmware_info 3.1 + */ + if ((frev == 3 && crev >=1) || (frev > 3)) { + firmware_info = (union firmware_info *) + (mode_info->atom_context->bios + data_offset); + fw_cap = le32_to_cpu(firmware_info->v31.firmware_capability); + } } - return false; + + return fw_cap; +} + +/* + * Helper function to query gpu virtualizaiton capability + * + * @adev: amdgpu_device pointer + * + * Return true if gpu virtualization is supported or false if not + */ +bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev) +{ + u32 fw_cap; + + fw_cap = adev->mode_info.firmware_flags; + + return (fw_cap & ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION) ? true : false; } void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev) @@ -400,41 +436,36 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev) return ecc_default_enabled; } -union firmware_info { - struct atom_firmware_info_v3_1 v31; - struct atom_firmware_info_v3_2 v32; - struct atom_firmware_info_v3_3 v33; - struct atom_firmware_info_v3_4 v34; -}; - /* + * Helper function to query sram ecc capablity + * + * @adev: amdgpu_device pointer + * * Return true if vbios supports sram ecc or false if not */ bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev) { - struct amdgpu_mode_info *mode_info = &adev->mode_info; - int index; - u16 data_offset, size; - union firmware_info *firmware_info; - u8 frev, crev; - bool sram_ecc_supported = false; + u32 fw_cap; - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - firmwareinfo); + fw_cap = adev->mode_info.firmware_flags; - if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, - index, &size, &frev, &crev, &data_offset)) { - /* support firmware_info 3.1 + */ - if ((frev == 3 && crev >=1) || (frev > 3)) { - firmware_info = (union firmware_info *) - (mode_info->atom_context->bios + data_offset); - sram_ecc_supported = - (le32_to_cpu(firmware_info->v31.firmware_capability) & - ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false; - } - } + return (fw_cap & ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false; +} - return sram_ecc_supported; +/* + * Helper function to query dynamic boot config capability + * + * @adev: amdgpu_device pointer + * + * Return true if vbios supports dynamic boot config or false if not + */ +bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev) +{ + u32 fw_cap; + + fw_cap = adev->mode_info.firmware_flags; + + return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false; } union smu_info { @@ -466,10 +497,6 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev) adev->pm.current_sclk = adev->clock.default_sclk; adev->pm.current_mclk = adev->clock.default_mclk; - /* not technically a clock, but... */ - adev->mode_info.firmware_flags = - le32_to_cpu(firmware_info->v31.firmware_capability); - ret = 0; } @@ -519,6 +546,21 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev) ret = 0; } + /* if asic is Navi+, the rlc reference clock is used for system clock + * from vbios gfx_info table */ + if (adev->asic_type >= CHIP_NAVI10) { + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + gfx_info); + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + struct atom_gfx_info_v2_2 *gfx_info = (struct atom_gfx_info_v2_2*) + (mode_info->atom_context->bios + data_offset); + if ((frev == 2) && (crev >= 2)) + spll->reference_freq = le32_to_cpu(gfx_info->rlc_gpu_timer_refclk); + ret = 0; + } + } + return ret; } @@ -584,67 +626,19 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev) } /* - * Check if VBIOS supports GDDR6 training data save/restore + * Helper function to query two stage mem training capability + * + * @adev: amdgpu_device pointer + * + * Return true if two stage mem training is supported or false if not */ -static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev) +bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev) { - uint16_t data_offset; - int index; - - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - firmwareinfo); - if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL, - NULL, NULL, &data_offset)) { - struct atom_firmware_info_v3_1 *firmware_info = - (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios + - data_offset); - - DRM_DEBUG("atom firmware capability:0x%08x.\n", - le32_to_cpu(firmware_info->firmware_capability)); + u32 fw_cap; - if (le32_to_cpu(firmware_info->firmware_capability) & - ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) - return true; - } + fw_cap = adev->mode_info.firmware_flags; - return false; -} - -int amdgpu_mem_train_support(struct amdgpu_device *adev) -{ - int ret; - uint32_t major, minor, revision, hw_v; - - if (gddr6_mem_train_vbios_support(adev)) { - amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision); - hw_v = HW_REV(major, minor, revision); - /* - * treat 0 revision as a special case since register for MP0 and MMHUB is missing - * for some Navi10 A0, preventing driver from discovering the hwip information since - * none of the functions will be initialized, it should not cause any problems - */ - switch (hw_v) { - case HW_REV(11, 0, 0): - case HW_REV(11, 0, 5): - case HW_REV(11, 0, 7): - case HW_REV(11, 0, 11): - case HW_REV(11, 0, 12): - ret = 1; - break; - default: - DRM_ERROR("memory training vbios supports but psp hw(%08x)" - " doesn't support!\n", hw_v); - ret = -1; - break; - } - } else { - ret = 0; - hw_v = -1; - } - - - DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret); - return ret; + return (fw_cap & ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) ? true : false; } int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h index 9f0d4356e8df..1bbbb195015d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h @@ -26,7 +26,8 @@ #define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t)) -bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev); +uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *adev); +bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev); void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev); int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, @@ -35,7 +36,8 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev); bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev); bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev); +bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev); +bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev); -int amdgpu_mem_train_support(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index b5c766998045..76fe5b71e35d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -396,10 +396,10 @@ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, spin_unlock(&adev->mm_stats.lock); } -static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, - struct amdgpu_bo *bo) +static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct amdgpu_cs_parser *p = param; struct ttm_operation_ctx ctx = { .interruptible = true, .no_wait_gpu = false, @@ -451,21 +451,6 @@ retry: return r; } -static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo) -{ - struct amdgpu_cs_parser *p = param; - int r; - - r = amdgpu_cs_bo_validate(p, bo); - if (r) - return r; - - if (bo->shadow) - r = amdgpu_cs_bo_validate(p, bo->shadow); - - return r; -} - static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, struct list_head *validated) { @@ -493,7 +478,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, lobj->user_pages); } - r = amdgpu_cs_validate(p, bo); + r = amdgpu_cs_bo_validate(p, bo); if (r) return r; @@ -593,7 +578,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, p->bytes_moved_vis = 0; r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, - amdgpu_cs_validate, p); + amdgpu_cs_bo_validate, p); if (r) { DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n"); goto error_validate; @@ -672,12 +657,12 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) } /** - * cs_parser_fini() - clean parser states + * amdgpu_cs_parser_fini() - clean parser states * @parser: parser structure holding parsing context. * @error: error number * @backoff: indicator to backoff the reservation * - * If error is set than unvalidate buffer, otherwise just free memory + * If error is set then unvalidate buffer, otherwise just free memory * used by parsing context. **/ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, @@ -796,7 +781,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false); + r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL); if (r) return r; @@ -807,7 +792,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { bo_va = fpriv->csa_va; BUG_ON(!bo_va); - r = amdgpu_vm_bo_update(adev, bo_va, false); + r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); if (r) return r; @@ -826,7 +811,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (bo_va == NULL) continue; - r = amdgpu_vm_bo_update(adev, bo_va, false); + r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); if (r) return r; @@ -847,7 +832,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo); + p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo); if (amdgpu_vm_debug) { /* Invalidate all BOs to test for userspace bugs */ @@ -1488,7 +1473,7 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, } /** - * amdgpu_cs_wait_all_fence - wait on all fences to signal + * amdgpu_cs_wait_all_fences - wait on all fences to signal * * @adev: amdgpu device * @filp: file private @@ -1639,7 +1624,7 @@ err_free_fences: } /** - * amdgpu_cs_find_bo_va - find bo_va for VM address + * amdgpu_cs_find_mapping - find bo_va for VM address * * @parser: command submission parser context * @addr: VM address diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 6819fe5612d9..e7a010b7ca1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -331,10 +331,13 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev, return 0; } +#define AMDGPU_RAS_COUNTE_DELAY_MS 3000 + static int amdgpu_ctx_query2(struct amdgpu_device *adev, - struct amdgpu_fpriv *fpriv, uint32_t id, - union drm_amdgpu_ctx_out *out) + struct amdgpu_fpriv *fpriv, uint32_t id, + union drm_amdgpu_ctx_out *out) { + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct amdgpu_ctx *ctx; struct amdgpu_ctx_mgr *mgr; @@ -361,6 +364,30 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev, if (atomic_read(&ctx->guilty)) out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY; + if (adev->ras_enabled && con) { + /* Return the cached values in O(1), + * and schedule delayed work to cache + * new vaues. + */ + int ce_count, ue_count; + + ce_count = atomic_read(&con->ras_ce_count); + ue_count = atomic_read(&con->ras_ue_count); + + if (ce_count != ctx->ras_counter_ce) { + ctx->ras_counter_ce = ce_count; + out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE; + } + + if (ue_count != ctx->ras_counter_ue) { + ctx->ras_counter_ue = ue_count; + out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE; + } + + schedule_delayed_work(&con->ras_counte_delay_work, + msecs_to_jiffies(AMDGPU_RAS_COUNTE_DELAY_MS)); + } + mutex_unlock(&mgr->lock); return 0; } @@ -635,3 +662,81 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) idr_destroy(&mgr->ctx_handles); mutex_destroy(&mgr->lock); } + +static void amdgpu_ctx_fence_time(struct amdgpu_ctx *ctx, + struct amdgpu_ctx_entity *centity, ktime_t *total, ktime_t *max) +{ + ktime_t now, t1; + uint32_t i; + + *total = *max = 0; + + now = ktime_get(); + for (i = 0; i < amdgpu_sched_jobs; i++) { + struct dma_fence *fence; + struct drm_sched_fence *s_fence; + + spin_lock(&ctx->ring_lock); + fence = dma_fence_get(centity->fences[i]); + spin_unlock(&ctx->ring_lock); + if (!fence) + continue; + s_fence = to_drm_sched_fence(fence); + if (!dma_fence_is_signaled(&s_fence->scheduled)) { + dma_fence_put(fence); + continue; + } + t1 = s_fence->scheduled.timestamp; + if (!ktime_before(t1, now)) { + dma_fence_put(fence); + continue; + } + if (dma_fence_is_signaled(&s_fence->finished) && + s_fence->finished.timestamp < now) + *total += ktime_sub(s_fence->finished.timestamp, t1); + else + *total += ktime_sub(now, t1); + t1 = ktime_sub(now, t1); + dma_fence_put(fence); + *max = max(t1, *max); + } +} + +ktime_t amdgpu_ctx_mgr_fence_usage(struct amdgpu_ctx_mgr *mgr, uint32_t hwip, + uint32_t idx, uint64_t *elapsed) +{ + struct idr *idp; + struct amdgpu_ctx *ctx; + uint32_t id; + struct amdgpu_ctx_entity *centity; + ktime_t total = 0, max = 0; + + if (idx >= AMDGPU_MAX_ENTITY_NUM) + return 0; + idp = &mgr->ctx_handles; + mutex_lock(&mgr->lock); + idr_for_each_entry(idp, ctx, id) { + ktime_t ttotal, tmax; + + if (!ctx->entities[hwip][idx]) + continue; + + centity = ctx->entities[hwip][idx]; + amdgpu_ctx_fence_time(ctx, centity, &ttotal, &tmax); + + /* Harmonic mean approximation diverges for very small + * values. If ratio < 0.01% ignore + */ + if (AMDGPU_CTX_FENCE_USAGE_MIN_RATIO(tmax, ttotal)) + continue; + + total = ktime_add(total, ttotal); + max = ktime_after(tmax, max) ? tmax : max; + } + + mutex_unlock(&mgr->lock); + if (elapsed) + *elapsed = max; + + return total; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h index f54e10314661..14db16bc3322 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h @@ -30,6 +30,7 @@ struct drm_file; struct amdgpu_fpriv; #define AMDGPU_MAX_ENTITY_NUM 4 +#define AMDGPU_CTX_FENCE_USAGE_MIN_RATIO(max, total) ((max) > 16384ULL*(total)) struct amdgpu_ctx_entity { uint64_t sequence; @@ -87,5 +88,6 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr); long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout); void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); - +ktime_t amdgpu_ctx_mgr_fence_usage(struct amdgpu_ctx_mgr *mgr, uint32_t hwip, + uint32_t idx, uint64_t *elapsed); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index bcaf271b39bf..536005bff24a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -990,7 +990,7 @@ err: } /** - * amdgpu_debugfs_regs_gfxoff_write - Enable/disable GFXOFF + * amdgpu_debugfs_gfxoff_write - Enable/disable GFXOFF * * @f: open file handle * @buf: User buffer to write data from @@ -1041,7 +1041,7 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu /** - * amdgpu_debugfs_regs_gfxoff_status - read gfxoff status + * amdgpu_debugfs_gfxoff_read - read gfxoff status * * @f: open file handle * @buf: User buffer to store read data in @@ -1304,11 +1304,11 @@ static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused) seq_printf(m, "pid:%d\tProcess:%s ----------\n", vm->task_info.pid, vm->task_info.process_name); - r = amdgpu_bo_reserve(vm->root.base.bo, true); + r = amdgpu_bo_reserve(vm->root.bo, true); if (r) break; amdgpu_debugfs_vm_bo_info(vm, m); - amdgpu_bo_unreserve(vm->root.base.bo); + amdgpu_bo_unreserve(vm->root.bo); } mutex_unlock(&dev->filelist_mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 57ec108b5972..130a9adf09ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -71,6 +71,8 @@ #include <drm/task_barrier.h> #include <linux/pm_runtime.h> +#include <drm/drm_drv.h> + MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); @@ -82,6 +84,7 @@ MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_gpu_info.bin"); #define AMDGPU_RESUME_MS 2000 @@ -119,6 +122,8 @@ const char *amdgpu_asic_name[] = { "NAVY_FLOUNDER", "VANGOGH", "DIMGREY_CAVEFISH", + "BEIGE_GOBY", + "YELLOW_CARP", "LAST", }; @@ -262,6 +267,21 @@ bool amdgpu_device_supports_baco(struct drm_device *dev) return amdgpu_asic_supports_baco(adev); } +/** + * amdgpu_device_supports_smart_shift - Is the device dGPU with + * smart shift support + * + * @dev: drm_device pointer + * + * Returns true if the device is a dGPU with Smart Shift support, + * otherwise returns false. + */ +bool amdgpu_device_supports_smart_shift(struct drm_device *dev) +{ + return (amdgpu_device_supports_boco(dev) && + amdgpu_acpi_is_power_shift_control_supported()); +} + /* * VRAM access helper functions */ @@ -281,7 +301,10 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, unsigned long flags; uint32_t hi = ~0; uint64_t last; + int idx; + if (!drm_dev_enter(&adev->ddev, &idx)) + return; #ifdef CONFIG_64BIT last = min(pos + size, adev->gmc.visible_vram_size); @@ -292,15 +315,15 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, if (write) { memcpy_toio(addr, buf, count); mb(); - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); } else { - amdgpu_asic_invalidate_hdp(adev, NULL); + amdgpu_device_invalidate_hdp(adev, NULL); mb(); memcpy_fromio(buf, addr, count); } if (count == size) - return; + goto exit; pos += count; buf += count / 4; @@ -323,6 +346,11 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, *buf++ = RREG32_NO_KIQ(mmMM_DATA); } spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + +#ifdef CONFIG_64BIT +exit: +#endif + drm_dev_exit(idx); } /* @@ -332,7 +360,7 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, /* Check if hw access should be skipped because of hotplug or device error */ bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev) { - if (adev->in_pci_err_recovery) + if (adev->no_hw_access) return true; #ifdef CONFIG_LOCKDEP @@ -490,7 +518,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->is_rlcg_access_range) { if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) - return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0); + return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0, 0); } else { writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); } @@ -1820,6 +1848,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: default: return 0; case CHIP_VEGA10: @@ -1857,6 +1886,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) case CHIP_VANGOGH: chip_name = "vangogh"; break; + case CHIP_YELLOW_CARP: + chip_name = "yellow_carp"; + break; } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); @@ -2033,9 +2065,13 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: case CHIP_VANGOGH: + case CHIP_YELLOW_CARP: if (adev->asic_type == CHIP_VANGOGH) adev->family = AMDGPU_FAMILY_VGH; + else if (adev->asic_type == CHIP_YELLOW_CARP) + adev->family = AMDGPU_FAMILY_YC; else adev->family = AMDGPU_FAMILY_NV; @@ -2571,34 +2607,26 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) return 0; } -/** - * amdgpu_device_ip_fini - run fini for hardware IPs - * - * @adev: amdgpu_device pointer - * - * Main teardown pass for hardware IPs. The list of all the hardware - * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks - * are run. hw_fini tears down the hardware associated with each IP - * and sw_fini tears down any software state associated with each IP. - * Returns 0 on success, negative error code on failure. - */ -static int amdgpu_device_ip_fini(struct amdgpu_device *adev) +static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) { int i, r; - if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done) - amdgpu_virt_release_ras_err_handler_data(adev); + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].version->funcs->early_fini) + continue; - amdgpu_ras_pre_fini(adev); + r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev); + if (r) { + DRM_DEBUG("early_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + } + } - if (adev->gmc.xgmi.num_physical_nodes > 1) - amdgpu_xgmi_remove_device(adev); + amdgpu_amdkfd_suspend(adev, false); amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); - amdgpu_amdkfd_device_fini(adev); - /* need to disable SMC first */ for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.hw) @@ -2629,6 +2657,33 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) adev->ip_blocks[i].status.hw = false; } + return 0; +} + +/** + * amdgpu_device_ip_fini - run fini for hardware IPs + * + * @adev: amdgpu_device pointer + * + * Main teardown pass for hardware IPs. The list of all the hardware + * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks + * are run. hw_fini tears down the hardware associated with each IP + * and sw_fini tears down any software state associated with each IP. + * Returns 0 on success, negative error code on failure. + */ +static int amdgpu_device_ip_fini(struct amdgpu_device *adev) +{ + int i, r; + + if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done) + amdgpu_virt_release_ras_err_handler_data(adev); + + amdgpu_ras_pre_fini(adev); + + if (adev->gmc.xgmi.num_physical_nodes > 1) + amdgpu_xgmi_remove_device(adev); + + amdgpu_amdkfd_device_fini_sw(adev); for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.sw) @@ -2856,7 +2911,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_IH, }; - for (i = 0; i < ARRAY_SIZE(ip_order); i++) { + for (i = 0; i < adev->num_ip_blocks; i++) { int j; struct amdgpu_ip_block *block; @@ -3034,7 +3089,7 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) { if (amdgpu_sriov_vf(adev)) { if (adev->is_atom_fw) { - if (amdgpu_atomfirmware_gpu_supports_virtualization(adev)) + if (amdgpu_atomfirmware_gpu_virtualization_supported(adev)) adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; } else { if (amdgpu_atombios_has_gpu_virtualization_table(adev)) @@ -3097,7 +3152,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: case CHIP_VANGOGH: + case CHIP_YELLOW_CARP: #endif return amdgpu_dc != 0; #endif @@ -3181,8 +3238,8 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) int ret = 0; /* - * By default timeout for non compute jobs is 10000. - * And there is no timeout enforced on compute jobs. + * By default timeout for non compute jobs is 10000 + * and 60000 for compute jobs. * In SR-IOV or passthrough mode, timeout for compute * jobs are 60000 by default. */ @@ -3191,10 +3248,8 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ? msecs_to_jiffies(60000) : msecs_to_jiffies(10000); - else if (amdgpu_passthrough(adev)) - adev->compute_timeout = msecs_to_jiffies(60000); else - adev->compute_timeout = MAX_SCHEDULE_TIMEOUT; + adev->compute_timeout = msecs_to_jiffies(60000); if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { while ((timeout_setting = strsep(&input, ",")) && @@ -3251,7 +3306,6 @@ static const struct attribute *amdgpu_dev_attributes[] = { NULL }; - /** * amdgpu_device_init - initialize the driver * @@ -3653,6 +3707,27 @@ failed_unmap: return r; } +static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev) +{ + /* Clear all CPU mappings pointing to this device */ + unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1); + + /* Unmap all mapped bars - Doorbell, registers and VRAM */ + amdgpu_device_doorbell_fini(adev); + + iounmap(adev->rmmio); + adev->rmmio = NULL; + if (adev->mman.aper_base_kaddr) + iounmap(adev->mman.aper_base_kaddr); + adev->mman.aper_base_kaddr = NULL; + + /* Memory manager related */ + if (!adev->gmc.xgmi.connected_to_cpu) { + arch_phys_wc_del(adev->gmc.vram_mtrr); + arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); + } +} + /** * amdgpu_device_fini - tear down the driver * @@ -3661,15 +3736,13 @@ failed_unmap: * Tear down the driver info (all asics). * Called at driver shutdown. */ -void amdgpu_device_fini(struct amdgpu_device *adev) +void amdgpu_device_fini_hw(struct amdgpu_device *adev) { dev_info(adev->dev, "amdgpu: finishing device.\n"); flush_delayed_work(&adev->delayed_init_work); ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); adev->shutdown = true; - kfree(adev->pci_state); - /* make sure IB test finished before entering exclusive mode * to avoid preemption on IB test * */ @@ -3686,11 +3759,29 @@ void amdgpu_device_fini(struct amdgpu_device *adev) else drm_atomic_helper_shutdown(adev_to_drm(adev)); } - amdgpu_fence_driver_fini(adev); + amdgpu_fence_driver_fini_hw(adev); + if (adev->pm_sysfs_en) amdgpu_pm_sysfs_fini(adev); + if (adev->ucode_sysfs_en) + amdgpu_ucode_sysfs_fini(adev); + sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); + amdgpu_fbdev_fini(adev); + + amdgpu_irq_fini_hw(adev); + + amdgpu_device_ip_fini_early(adev); + + amdgpu_gart_dummy_page_fini(adev); + + amdgpu_device_unmap_mmio(adev); +} + +void amdgpu_device_fini_sw(struct amdgpu_device *adev) +{ amdgpu_device_ip_fini(adev); + amdgpu_fence_driver_fini_sw(adev); release_firmware(adev->firmware.gpu_info_fw); adev->firmware.gpu_info_fw = NULL; adev->accel_working = false; @@ -3712,18 +3803,14 @@ void amdgpu_device_fini(struct amdgpu_device *adev) } if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) vga_client_register(adev->pdev, NULL, NULL, NULL); - iounmap(adev->rmmio); - adev->rmmio = NULL; - amdgpu_device_doorbell_fini(adev); - - if (adev->ucode_sysfs_en) - amdgpu_ucode_sysfs_fini(adev); - sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); if (IS_ENABLED(CONFIG_PERF_EVENTS)) amdgpu_pmu_fini(adev); if (adev->mman.discovery_bin) amdgpu_discovery_fini(adev); + + kfree(adev->pci_state); + } @@ -3743,12 +3830,15 @@ void amdgpu_device_fini(struct amdgpu_device *adev) int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) { struct amdgpu_device *adev = drm_to_adev(dev); - int r; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; adev->in_suspend = true; + + if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) + DRM_WARN("smart shift update failed\n"); + drm_kms_helper_poll_disable(dev); if (fbcon) @@ -3758,7 +3848,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) amdgpu_ras_suspend(adev); - r = amdgpu_device_ip_suspend_phase1(adev); + amdgpu_device_ip_suspend_phase1(adev); if (!adev->in_s0ix) amdgpu_amdkfd_suspend(adev, adev->in_runpm); @@ -3768,7 +3858,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) amdgpu_fence_driver_suspend(adev); - r = amdgpu_device_ip_suspend_phase2(adev); + amdgpu_device_ip_suspend_phase2(adev); /* evict remaining vram memory * This second call to evict vram is to evict the gart page table * using the CPU. @@ -3858,6 +3948,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) #endif adev->in_suspend = false; + if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0)) + DRM_WARN("smart shift update failed\n"); + return 0; } @@ -4031,6 +4124,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) { struct dma_fence *fence = NULL, *next = NULL; struct amdgpu_bo *shadow; + struct amdgpu_bo_vm *vmbo; long r = 1, tmo; if (amdgpu_sriov_runtime(adev)) @@ -4040,12 +4134,12 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev) dev_info(adev->dev, "recover vram bo from shadow start\n"); mutex_lock(&adev->shadow_list_lock); - list_for_each_entry(shadow, &adev->shadow_list, shadow_list) { - + list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) { + shadow = &vmbo->bo; /* No need to recover an evicted BO */ - if (shadow->tbo.mem.mem_type != TTM_PL_TT || - shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET || - shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM) + if (shadow->tbo.resource->mem_type != TTM_PL_TT || + shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET || + shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM) continue; r = amdgpu_bo_restore_shadow(shadow, &next); @@ -4634,7 +4728,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) return 0; } -void amdgpu_device_recheck_guilty_jobs( +static void amdgpu_device_recheck_guilty_jobs( struct amdgpu_device *adev, struct list_head *device_list_handle, struct amdgpu_reset_context *reset_context) { @@ -4937,6 +5031,8 @@ skip_hw_reset: amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); } else { dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); + if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0)) + DRM_WARN("smart shift update failed\n"); } } @@ -5125,7 +5221,8 @@ int amdgpu_device_baco_enter(struct drm_device *dev) if (!amdgpu_device_supports_baco(adev_to_drm(adev))) return -ENOTSUPP; - if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt) + if (ras && adev->ras_enabled && + adev->nbio.funcs->enable_doorbell_interrupt) adev->nbio.funcs->enable_doorbell_interrupt(adev, false); return amdgpu_dpm_baco_enter(adev); @@ -5144,7 +5241,8 @@ int amdgpu_device_baco_exit(struct drm_device *dev) if (ret) return ret; - if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt) + if (ras && adev->ras_enabled && + adev->nbio.funcs->enable_doorbell_interrupt) adev->nbio.funcs->enable_doorbell_interrupt(adev, true); return 0; @@ -5290,9 +5388,9 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); - adev->in_pci_err_recovery = true; + adev->no_hw_access = true; r = amdgpu_device_pre_asic_reset(adev, &reset_context); - adev->in_pci_err_recovery = false; + adev->no_hw_access = false; if (r) goto out; @@ -5387,4 +5485,31 @@ bool amdgpu_device_load_pci_state(struct pci_dev *pdev) return true; } +void amdgpu_device_flush_hdp(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ +#ifdef CONFIG_X86_64 + if (adev->flags & AMD_IS_APU) + return; +#endif + if (adev->gmc.xgmi.connected_to_cpu) + return; + if (ring && ring->funcs->emit_hdp_flush) + amdgpu_ring_emit_hdp_flush(ring); + else + amdgpu_asic_flush_hdp(adev, ring); +} + +void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ +#ifdef CONFIG_X86_64 + if (adev->flags & AMD_IS_APU) + return; +#endif + if (adev->gmc.xgmi.connected_to_cpu) + return; + + amdgpu_asic_invalidate_hdp(adev, ring); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index e1b6f5891759..43e7b61d1c5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -325,7 +325,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) return 0; } -int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, +int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance, int *major, int *minor, int *revision) { struct binary_header *bhdr; @@ -357,7 +357,7 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, for (j = 0; j < num_ips; j++) { ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); - if (le16_to_cpu(ip->hw_id) == hw_id) { + if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) { if (major) *major = ip->major; if (minor) @@ -373,6 +373,14 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, return -EINVAL; } + +int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance, + int *major, int *minor, int *revision) +{ + return amdgpu_discovery_get_ip_version(adev, VCN_HWID, + vcn_instance, major, minor, revision); +} + void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) { struct binary_header *bhdr; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h index 1b1ae21b1037..48e6b88cfdfe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h @@ -30,8 +30,11 @@ void amdgpu_discovery_fini(struct amdgpu_device *adev); int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev); void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev); -int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, +int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance, int *major, int *minor, int *revision); + +int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance, + int *major, int *minor, int *revision); int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev); #endif /* __AMDGPU_DISCOVERY__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 2a4cd7d377bf..8e5a7ac8c36f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -203,9 +203,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, goto unpin; } - r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl, - &work->shared_count, - &work->shared); + r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl, + &work->shared_count, &work->shared); if (unlikely(r != 0)) { DRM_ERROR("failed to get fences for buffer\n"); goto unpin; @@ -1075,12 +1074,9 @@ int amdgpu_display_gem_fb_verify_and_init( /* Verify that the modifier is supported. */ if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format, mode_cmd->modifier[0])) { - struct drm_format_name_buf format_name; drm_dbg_kms(dev, - "unsupported pixel format %s / modifier 0x%llx\n", - drm_get_format_name(mode_cmd->pixel_format, - &format_name), - mode_cmd->modifier[0]); + "unsupported pixel format %p4cc / modifier 0x%llx\n", + &mode_cmd->pixel_format, mode_cmd->modifier[0]); ret = -EINVAL; goto err; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 37ec59365080..a9475b207510 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -42,52 +42,6 @@ #include <linux/pci-p2pdma.h> #include <linux/pm_runtime.h> -/** - * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation - * @obj: GEM BO - * @vma: Virtual memory area - * - * Sets up a userspace mapping of the BO's memory in the given - * virtual memory area. - * - * Returns: - * 0 on success or a negative error code on failure. - */ -int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma) -{ - struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - unsigned asize = amdgpu_bo_size(bo); - int ret; - - if (!vma->vm_file) - return -ENODEV; - - if (adev == NULL) - return -ENODEV; - - /* Check for valid size. */ - if (asize < vma->vm_end - vma->vm_start) - return -EINVAL; - - if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || - (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { - return -EPERM; - } - vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT; - - /* prime mmap does not need to check access, so allow here */ - ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data); - if (ret) - return ret; - - ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev); - drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data); - - return ret; -} - static int __dma_resv_make_exclusive(struct dma_resv *obj) { @@ -95,10 +49,10 @@ __dma_resv_make_exclusive(struct dma_resv *obj) unsigned int count; int r; - if (!dma_resv_get_list(obj)) /* no shared fences to convert */ + if (!dma_resv_shared_list(obj)) /* no shared fences to convert */ return 0; - r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences); + r = dma_resv_get_fences(obj, NULL, &count, &fences); if (r) return r; @@ -284,12 +238,12 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, if (r) return ERR_PTR(r); - } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) & + } else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) & AMDGPU_GEM_DOMAIN_GTT)) { return ERR_PTR(-EBUSY); } - switch (bo->tbo.mem.mem_type) { + switch (bo->tbo.resource->mem_type) { case TTM_PL_TT: sgt = drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, @@ -303,8 +257,9 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, break; case TTM_PL_VRAM: - r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, 0, - bo->tbo.base.size, attach->dev, dir, &sgt); + r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0, + bo->tbo.base.size, attach->dev, + dir, &sgt); if (r) return ERR_PTR(r); break; @@ -494,7 +449,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) struct amdgpu_vm_bo_base *bo_base; int r; - if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM) + if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM) return; r = ttm_bo_validate(&bo->tbo, &placement, &ctx); @@ -505,7 +460,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { struct amdgpu_vm *vm = bo_base->vm; - struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; + struct dma_resv *resv = vm->root.bo->tbo.base.resv; if (ticket) { /* When we get an error here it means that somebody diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h index 39b5b9616fd8..3e93b9b407a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h @@ -31,8 +31,6 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, struct amdgpu_bo *bo); -int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma); extern const struct dma_buf_ops amdgpu_dmabuf_ops; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f93883db2b46..6f30c525caac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -23,6 +23,7 @@ */ #include <drm/amdgpu_drm.h> +#include <drm/drm_aperture.h> #include <drm/drm_drv.h> #include <drm/drm_gem.h> #include <drm/drm_vblank.h> @@ -42,7 +43,7 @@ #include "amdgpu_irq.h" #include "amdgpu_dma_buf.h" #include "amdgpu_sched.h" - +#include "amdgpu_fdinfo.h" #include "amdgpu_amdkfd.h" #include "amdgpu_ras.h" @@ -94,9 +95,10 @@ * - 3.39.0 - DMABUF implicit sync does a full pipeline sync * - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ * - 3.41.0 - Add video codec query + * - 3.42.0 - Add 16bpc fixed point display support */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 41 +#define KMS_DRIVER_MINOR 42 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit; @@ -171,6 +173,7 @@ int amdgpu_tmz = -1; /* auto */ uint amdgpu_freesync_vid_mode; int amdgpu_reset_method = -1; /* auto */ int amdgpu_num_kcq = -1; +int amdgpu_smartshift_bias; static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work); @@ -287,9 +290,9 @@ module_param_named(msi, amdgpu_msi, int, 0444); * for SDMA and Video. * * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video) - * jobs is 10000. And there is no timeout enforced on compute jobs. + * jobs is 10000. The timeout for compute is 60000. */ -MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and infinity timeout for compute jobs; " +MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; " "for passthrough or sriov, 10000 for all jobs." " 0: keep default value. negative: infinity timeout), " "format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " @@ -640,7 +643,8 @@ module_param_named(mes, amdgpu_mes, int, 0444); /** * DOC: noretry (int) - * Disable retry faults in the GPU memory controller. + * Disable XNACK retry in the SQ by default on GFXv9 hardware. On ASICs that + * do not support per-process XNACK this also disables retry page faults. * (0 = retry enabled, 1 = retry disabled, -1 auto (default)) */ MODULE_PARM_DESC(noretry, @@ -833,8 +837,23 @@ module_param_named(tmz, amdgpu_tmz, int, 0444); /** * DOC: freesync_video (uint) - * Enabled the optimization to adjust front porch timing to achieve seamless mode change experience - * when setting a freesync supported mode for which full modeset is not needed. + * Enable the optimization to adjust front porch timing to achieve seamless + * mode change experience when setting a freesync supported mode for which full + * modeset is not needed. + * + * The Display Core will add a set of modes derived from the base FreeSync + * video mode into the corresponding connector's mode list based on commonly + * used refresh rates and VRR range of the connected display, when users enable + * this feature. From the userspace perspective, they can see a seamless mode + * change experience when the change between different refresh rates under the + * same resolution. Additionally, userspace applications such as Video playback + * can read this modeset list and change the refresh rate based on the video + * frame rate. Finally, the userspace can also derive an appropriate mode for a + * particular refresh rate based on the FreeSync Mode and add it to the + * connector's mode list. + * + * Note: This is an experimental feature. + * * The default value: 0 (off). */ MODULE_PARM_DESC( @@ -1185,6 +1204,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x7408, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT}, {0x1002, 0x740C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT}, {0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT}, {0, 0, 0} }; @@ -1258,7 +1278,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, #endif /* Get rid of things like offb */ - ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb"); + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb"); if (ret) return ret; @@ -1310,14 +1330,16 @@ amdgpu_pci_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); -#ifdef MODULE - if (THIS_MODULE->state != MODULE_STATE_GOING) -#endif - DRM_ERROR("Hotplug removal is not supported\n"); drm_dev_unplug(dev); amdgpu_driver_unload_kms(dev); + + /* + * Flush any in flight DMA operations from device. + * Clear the Bus Master Enable bit and then wait on the PCIe Device + * StatusTransactions Pending bit. + */ pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); + pci_wait_for_pending_transaction(pdev); } static void @@ -1552,6 +1574,10 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) if (!adev->runpm) return -EINVAL; + /* Avoids registers access if device is physically gone */ + if (!pci_device_is_present(adev->pdev)) + adev->no_hw_access = true; + if (amdgpu_device_supports_px(drm_dev)) { drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; @@ -1597,17 +1623,15 @@ static int amdgpu_pmops_runtime_idle(struct device *dev) if (amdgpu_device_has_dc_support(adev)) { struct drm_crtc *crtc; - drm_modeset_lock_all(drm_dev); - drm_for_each_crtc(crtc, drm_dev) { - if (crtc->state->active) { + drm_modeset_lock(&crtc->mutex, NULL); + if (crtc->state->active) ret = -EBUSY; + drm_modeset_unlock(&crtc->mutex); + if (ret < 0) break; - } } - drm_modeset_unlock_all(drm_dev); - } else { struct drm_connector *list_connector; struct drm_connector_list_iter iter; @@ -1688,12 +1712,15 @@ static const struct file_operations amdgpu_driver_kms_fops = { .flush = amdgpu_flush, .release = drm_release, .unlocked_ioctl = amdgpu_drm_ioctl, - .mmap = amdgpu_mmap, + .mmap = drm_gem_mmap, .poll = drm_poll, .read = drm_read, #ifdef CONFIG_COMPAT .compat_ioctl = amdgpu_kms_compat_ioctl, #endif +#ifdef CONFIG_PROC_FS + .show_fdinfo = amdgpu_show_fdinfo +#endif }; int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv) @@ -1747,11 +1774,12 @@ static const struct drm_driver amdgpu_kms_driver = { .dumb_create = amdgpu_mode_dumb_create, .dumb_map_offset = amdgpu_mode_dumb_mmap, .fops = &amdgpu_driver_kms_fops, + .release = &amdgpu_driver_release_kms, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import = amdgpu_gem_prime_import, - .gem_prime_mmap = amdgpu_gem_prime_mmap, + .gem_prime_mmap = drm_gem_prime_mmap, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -1768,6 +1796,18 @@ static struct pci_error_handlers amdgpu_pci_err_handler = { .resume = amdgpu_pci_resume, }; +extern const struct attribute_group amdgpu_vram_mgr_attr_group; +extern const struct attribute_group amdgpu_gtt_mgr_attr_group; +extern const struct attribute_group amdgpu_vbios_version_attr_group; + +static const struct attribute_group *amdgpu_sysfs_groups[] = { + &amdgpu_vram_mgr_attr_group, + &amdgpu_gtt_mgr_attr_group, + &amdgpu_vbios_version_attr_group, + NULL, +}; + + static struct pci_driver amdgpu_kms_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, @@ -1776,6 +1816,7 @@ static struct pci_driver amdgpu_kms_pci_driver = { .shutdown = amdgpu_pci_shutdown, .driver.pm = &amdgpu_pm_ops, .err_handler = &amdgpu_pci_err_handler, + .dev_groups = amdgpu_sysfs_groups, }; static int __init amdgpu_init(void) @@ -1797,6 +1838,7 @@ static int __init amdgpu_init(void) DRM_INFO("amdgpu kernel modesetting enabled.\n"); amdgpu_register_atpx_handler(); + amdgpu_acpi_detect(); /* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */ amdgpu_amdkfd_init(); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c new file mode 100644 index 000000000000..d94c5419ec25 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: MIT +/* Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: David Nieto + * Roy Sun + */ + +#include <linux/debugfs.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/uaccess.h> +#include <linux/reboot.h> +#include <linux/syscalls.h> + +#include <drm/amdgpu_drm.h> +#include <drm/drm_debugfs.h> + +#include "amdgpu.h" +#include "amdgpu_vm.h" +#include "amdgpu_gem.h" +#include "amdgpu_ctx.h" +#include "amdgpu_fdinfo.h" + + +static const char *amdgpu_ip_name[AMDGPU_HW_IP_NUM] = { + [AMDGPU_HW_IP_GFX] = "gfx", + [AMDGPU_HW_IP_COMPUTE] = "compute", + [AMDGPU_HW_IP_DMA] = "dma", + [AMDGPU_HW_IP_UVD] = "dec", + [AMDGPU_HW_IP_VCE] = "enc", + [AMDGPU_HW_IP_UVD_ENC] = "enc_1", + [AMDGPU_HW_IP_VCN_DEC] = "dec", + [AMDGPU_HW_IP_VCN_ENC] = "enc", + [AMDGPU_HW_IP_VCN_JPEG] = "jpeg", +}; + +void amdgpu_show_fdinfo(struct seq_file *m, struct file *f) +{ + struct amdgpu_fpriv *fpriv; + uint32_t bus, dev, fn, i, domain; + uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0; + struct drm_file *file = f->private_data; + struct amdgpu_device *adev = drm_to_adev(file->minor->dev); + int ret; + + ret = amdgpu_file_to_fpriv(f, &fpriv); + if (ret) + return; + bus = adev->pdev->bus->number; + domain = pci_domain_nr(adev->pdev->bus); + dev = PCI_SLOT(adev->pdev->devfn); + fn = PCI_FUNC(adev->pdev->devfn); + + ret = amdgpu_bo_reserve(fpriv->vm.root.bo, false); + if (ret) { + DRM_ERROR("Fail to reserve bo\n"); + return; + } + amdgpu_vm_get_memory(&fpriv->vm, &vram_mem, >t_mem, &cpu_mem); + amdgpu_bo_unreserve(fpriv->vm.root.bo); + seq_printf(m, "pdev:\t%04x:%02x:%02x.%d\npasid:\t%u\n", domain, bus, + dev, fn, fpriv->vm.pasid); + seq_printf(m, "vram mem:\t%llu kB\n", vram_mem/1024UL); + seq_printf(m, "gtt mem:\t%llu kB\n", gtt_mem/1024UL); + seq_printf(m, "cpu mem:\t%llu kB\n", cpu_mem/1024UL); + for (i = 0; i < AMDGPU_HW_IP_NUM; i++) { + uint32_t count = amdgpu_ctx_num_entities[i]; + int idx = 0; + uint64_t total = 0, min = 0; + uint32_t perc, frac; + + for (idx = 0; idx < count; idx++) { + total = amdgpu_ctx_mgr_fence_usage(&fpriv->ctx_mgr, + i, idx, &min); + if ((total == 0) || (min == 0)) + continue; + + perc = div64_u64(10000 * total, min); + frac = perc % 100; + + seq_printf(m, "%s%d:\t%d.%d%%\n", + amdgpu_ip_name[i], + idx, perc/100, frac); + } + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h new file mode 100644 index 000000000000..41a4c7056729 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: MIT + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: David Nieto + * Roy Sun + */ +#ifndef __AMDGPU_SMI_H__ +#define __AMDGPU_SMI_H__ + +#include <linux/idr.h> +#include <linux/kfifo.h> +#include <linux/rbtree.h> +#include <drm/gpu_scheduler.h> +#include <drm/drm_file.h> +#include <drm/ttm/ttm_bo_driver.h> +#include <linux/sched/mm.h> + +#include "amdgpu_sync.h" +#include "amdgpu_ring.h" +#include "amdgpu_ids.h" + +uint32_t amdgpu_get_ip_count(struct amdgpu_device *adev, int id); +void amdgpu_show_fdinfo(struct seq_file *m, struct file *f); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 47ea46859618..72d9b92b1754 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -36,6 +36,7 @@ #include <linux/firmware.h> #include <linux/pm_runtime.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_trace.h" @@ -434,6 +435,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, * * @ring: ring to init the fence driver on * @num_hw_submission: number of entries on the hardware queue + * @sched_score: optional score atomic shared with other schedulers * * Init the fence driver for the requested ring (all asics). * Helper function for amdgpu_fence_driver_init(). @@ -523,10 +525,9 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev) * * Tear down the fence driver for all possible rings (all asics). */ -void amdgpu_fence_driver_fini(struct amdgpu_device *adev) +void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev) { - unsigned i, j; - int r; + int i, r; for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; @@ -535,16 +536,33 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) continue; if (!ring->no_scheduler) drm_sched_fini(&ring->sched); - r = amdgpu_fence_wait_empty(ring); - if (r) { - /* no need to trigger GPU reset as we are unloading */ + /* You can't wait for HW to signal if it's gone */ + if (!drm_dev_is_unplugged(&adev->ddev)) + r = amdgpu_fence_wait_empty(ring); + else + r = -ENODEV; + /* no need to trigger GPU reset as we are unloading */ + if (r) amdgpu_fence_driver_force_completion(ring); - } + if (ring->fence_drv.irq_src) amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); del_timer_sync(&ring->fence_drv.fallback_timer); + } +} + +void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev) +{ + unsigned int i, j; + + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + + if (!ring || !ring->fence_drv.initialized) + continue; + for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) dma_fence_put(ring->fence_drv.fences[j]); kfree(ring->fence_drv.fences); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c index 8d1ad294cb02..2ca3c329de6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c @@ -121,6 +121,9 @@ static const struct file_operations amdgpu_fw_attestation_debugfs_ops = { static int amdgpu_is_fw_attestation_supported(struct amdgpu_device *adev) { + if (adev->flags & AMD_IS_APU) + return 0; + if (adev->asic_type >= CHIP_SIENNA_CICHLID) return 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index c5a9a4fb10d2..b36405170ff3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -60,7 +60,7 @@ */ /** - * amdgpu_dummy_page_init - init dummy page used by the driver + * amdgpu_gart_dummy_page_init - init dummy page used by the driver * * @adev: amdgpu_device pointer * @@ -86,13 +86,13 @@ static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev) } /** - * amdgpu_dummy_page_fini - free dummy page used by the driver + * amdgpu_gart_dummy_page_fini - free dummy page used by the driver * * @adev: amdgpu_device pointer * * Frees the dummy page used by the driver (all asics). */ -static void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev) +void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev) { if (!adev->dummy_page_addr) return; @@ -250,7 +250,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, } } mb(); - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); for (i = 0; i < adev->num_vmhubs; i++) amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); @@ -300,7 +300,6 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, * @adev: amdgpu_device pointer * @offset: offset into the GPU's gart aperture * @pages: number of pages to bind - * @pagelist: pages to bind * @dma_addr: DMA addresses of pages * @flags: page table entry flags * @@ -309,11 +308,9 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, * Returns 0 for success, -EINVAL for failure. */ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, - int pages, struct page **pagelist, dma_addr_t *dma_addr, + int pages, dma_addr_t *dma_addr, uint64_t flags) { - int r, i; - if (!adev->gart.ready) { WARN(1, "trying to bind memory to uninitialized GART !\n"); return -EINVAL; @@ -322,16 +319,26 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, if (!adev->gart.ptr) return 0; - r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags, - adev->gart.ptr); - if (r) - return r; + return amdgpu_gart_map(adev, offset, pages, dma_addr, flags, + adev->gart.ptr); +} + +/** + * amdgpu_gart_invalidate_tlb - invalidate gart TLB + * + * @adev: amdgpu device driver pointer + * + * Invalidate gart TLB which can be use as a way to flush gart changes + * + */ +void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev) +{ + int i; mb(); - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); for (i = 0; i < adev->num_vmhubs; i++) amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); - return 0; } /** @@ -365,15 +372,3 @@ int amdgpu_gart_init(struct amdgpu_device *adev) return 0; } - -/** - * amdgpu_gart_fini - tear down the driver info for managing the gart - * - * @adev: amdgpu_device pointer - * - * Tear down the gart driver info and free the dummy page (all asics). - */ -void amdgpu_gart_fini(struct amdgpu_device *adev) -{ - amdgpu_gart_dummy_page_fini(adev); -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h index a25fe97b0196..78895413cf9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h @@ -57,14 +57,13 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev); int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); int amdgpu_gart_init(struct amdgpu_device *adev); -void amdgpu_gart_fini(struct amdgpu_device *adev); +void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev); int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, int pages); int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, int pages, dma_addr_t *dma_addr, uint64_t flags, void *dst); int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, - int pages, struct page **pagelist, - dma_addr_t *dma_addr, uint64_t flags); - + int pages, dma_addr_t *dma_addr, uint64_t flags); +void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 311bcdc59eda..b3404c43a911 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -32,6 +32,7 @@ #include <linux/dma-buf.h> #include <drm/amdgpu_drm.h> +#include <drm/drm_drv.h> #include <drm/drm_gem_ttm_helper.h> #include "amdgpu.h" @@ -41,6 +42,46 @@ static const struct drm_gem_object_funcs amdgpu_gem_object_funcs; +static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf) +{ + struct ttm_buffer_object *bo = vmf->vma->vm_private_data; + struct drm_device *ddev = bo->base.dev; + vm_fault_t ret; + int idx; + + ret = ttm_bo_vm_reserve(bo, vmf); + if (ret) + return ret; + + if (drm_dev_enter(ddev, &idx)) { + ret = amdgpu_bo_fault_reserve_notify(bo); + if (ret) { + drm_dev_exit(idx); + goto unlock; + } + + ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, + TTM_BO_VM_NUM_PREFAULT, 1); + + drm_dev_exit(idx); + } else { + ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); + } + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) + return ret; + +unlock: + dma_resv_unlock(bo->base.resv); + return ret; +} + +static const struct vm_operations_struct amdgpu_gem_vm_ops = { + .fault = amdgpu_gem_fault, + .open = ttm_bo_vm_open, + .close = ttm_bo_vm_close, + .access = ttm_bo_vm_access +}; + static void amdgpu_gem_object_free(struct drm_gem_object *gobj) { struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); @@ -129,7 +170,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, return -EPERM; if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID && - abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) + abo->tbo.base.resv != vm->root.bo->tbo.base.resv) return -EPERM; r = amdgpu_bo_reserve(abo, false); @@ -185,7 +226,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj, if (!amdgpu_vm_ready(vm)) goto out_unlock; - fence = dma_resv_get_excl(bo->tbo.base.resv); + fence = dma_resv_excl_fence(bo->tbo.base.resv); if (fence) { amdgpu_bo_fence(bo, fence, true); fence = NULL; @@ -205,6 +246,18 @@ out_unlock: ttm_eu_backoff_reservation(&ticket, &list); } +static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + + if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) + return -EPERM; + if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) + return -EPERM; + + return drm_gem_ttm_mmap(obj, vma); +} + static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = { .free = amdgpu_gem_object_free, .open = amdgpu_gem_object_open, @@ -212,6 +265,8 @@ static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = { .export = amdgpu_gem_prime_export, .vmap = drm_gem_ttm_vmap, .vunmap = drm_gem_ttm_vunmap, + .mmap = amdgpu_gem_object_mmap, + .vm_ops = &amdgpu_gem_vm_ops, }; /* @@ -265,11 +320,11 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, } if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { - r = amdgpu_bo_reserve(vm->root.base.bo, false); + r = amdgpu_bo_reserve(vm->root.bo, false); if (r) return r; - resv = vm->root.base.bo->tbo.base.resv; + resv = vm->root.bo->tbo.base.resv; } initial_domain = (u32)(0xffffffff & args->in.domains); @@ -298,9 +353,9 @@ retry: if (!r) { struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); - abo->parent = amdgpu_bo_ref(vm->root.base.bo); + abo->parent = amdgpu_bo_ref(vm->root.bo); } - amdgpu_bo_unreserve(vm->root.base.bo); + amdgpu_bo_unreserve(vm->root.bo); } if (r) return r; @@ -471,8 +526,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, return -ENOENT; } robj = gem_to_amdgpu_bo(gobj); - ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, - timeout); + ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout); /* ret == 0 means not signaled, * ret > 0 means signaled @@ -558,7 +612,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, if (operation == AMDGPU_VA_OP_MAP || operation == AMDGPU_VA_OP_REPLACE) { - r = amdgpu_vm_bo_update(adev, bo_va, false); + r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); if (r) goto error; } @@ -766,7 +820,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, void __user *out = u64_to_user_ptr(args->value); info.bo_size = robj->tbo.base.size; - info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; + info.alignment = robj->tbo.page_alignment << PAGE_SHIFT; info.domains = robj->preferred_domains; info.domain_flags = robj->flags; amdgpu_bo_unreserve(robj); @@ -787,7 +841,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, } for (base = robj->vm_bo; base; base = base->next) if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev), - amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) { + amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) { r = -EINVAL; amdgpu_bo_unreserve(robj); goto out; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 95d4f43a03df..a0be0772c8b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -607,7 +607,6 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev) struct ras_ih_if ih_info = { .cb = amdgpu_gfx_process_ras_data_cb, }; - struct ras_query_if info = { 0 }; if (!adev->gfx.ras_if) { adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); @@ -625,12 +624,8 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev) goto free; if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) { - if (adev->gmc.xgmi.connected_to_cpu) { - info.head = *adev->gfx.ras_if; - amdgpu_ras_query_error_status(adev, &info); - } else { + if (!amdgpu_persistent_edc_harvesting_supported(adev)) amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX); - } r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h index 66ebc2e3b2ad..beabab515836 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h @@ -34,6 +34,7 @@ struct amdgpu_gfxhub_funcs { void (*set_fault_enable_default)(struct amdgpu_device *adev, bool value); void (*init)(struct amdgpu_device *adev); int (*get_xgmi_info)(struct amdgpu_device *adev); + void (*utcl2_harvest)(struct amdgpu_device *adev); }; struct amdgpu_gfxhub { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index c39ed9eb0987..0174f7817ce2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -31,6 +31,8 @@ #include "amdgpu_ras.h" #include "amdgpu_xgmi.h" +#include <drm/drm_drv.h> + /** * amdgpu_gmc_pdb0_alloc - allocate vram for pdb0 * @@ -99,7 +101,7 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - switch (bo->tbo.mem.mem_type) { + switch (bo->tbo.resource->mem_type) { case TTM_PL_TT: *addr = bo->tbo.ttm->dma_address[0]; break; @@ -110,7 +112,7 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, *addr = 0; break; } - *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem); + *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource); amdgpu_gmc_get_vm_pde(adev, level, addr, flags); } @@ -151,6 +153,10 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, { void __iomem *ptr = (void *)cpu_pt_addr; uint64_t value; + int idx; + + if (!drm_dev_enter(&adev->ddev, &idx)) + return 0; /* * The following is for PTE only. GART does not have PDEs. @@ -158,6 +164,9 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, value = addr & 0x0000FFFFFFFFF000ULL; value |= flags; writeq(value, ptr + (gpu_page_idx * 8)); + + drm_dev_exit(idx); + return 0; } @@ -333,6 +342,17 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) } /** + * amdgpu_gmc_fault_key - get hask key from vm fault address and pasid + * + * @addr: 48 bit physical address, page aligned (36 significant bits) + * @pasid: 16 bit process address space identifier + */ +static inline uint64_t amdgpu_gmc_fault_key(uint64_t addr, uint16_t pasid) +{ + return addr << 4 | pasid; +} + +/** * amdgpu_gmc_filter_faults - filter VM faults * * @adev: amdgpu device structure @@ -348,8 +368,7 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, uint16_t pasid, uint64_t timestamp) { struct amdgpu_gmc *gmc = &adev->gmc; - - uint64_t stamp, key = addr << 4 | pasid; + uint64_t stamp, key = amdgpu_gmc_fault_key(addr, pasid); struct amdgpu_gmc_fault *fault; uint32_t hash; @@ -365,7 +384,7 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, while (fault->timestamp >= stamp) { uint64_t tmp; - if (fault->key == key) + if (atomic64_read(&fault->key) == key) return true; tmp = fault->timestamp; @@ -378,7 +397,7 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, /* Add the fault to the ring */ fault = &gmc->fault_ring[gmc->last_fault]; - fault->key = key; + atomic64_set(&fault->key, key); fault->timestamp = timestamp; /* And update the hash */ @@ -387,6 +406,36 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, return false; } +/** + * amdgpu_gmc_filter_faults_remove - remove address from VM faults filter + * + * @adev: amdgpu device structure + * @addr: address of the VM fault + * @pasid: PASID of the process causing the fault + * + * Remove the address from fault filter, then future vm fault on this address + * will pass to retry fault handler to recover. + */ +void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr, + uint16_t pasid) +{ + struct amdgpu_gmc *gmc = &adev->gmc; + uint64_t key = amdgpu_gmc_fault_key(addr, pasid); + struct amdgpu_gmc_fault *fault; + uint32_t hash; + uint64_t tmp; + + hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER); + fault = &gmc->fault_ring[gmc->fault_hash[hash].idx]; + do { + if (atomic64_cmpxchg(&fault->key, key, 0) == key) + break; + + tmp = fault->timestamp; + fault = &gmc->fault_ring[fault->next]; + } while (fault->timestamp < tmp); +} + int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) { int r; @@ -415,6 +464,13 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) return r; } + if (adev->hdp.ras_funcs && + adev->hdp.ras_funcs->ras_late_init) { + r = adev->hdp.ras_funcs->ras_late_init(adev); + if (r) + return r; + } + return 0; } @@ -426,11 +482,15 @@ void amdgpu_gmc_ras_fini(struct amdgpu_device *adev) if (adev->mmhub.ras_funcs && adev->mmhub.ras_funcs->ras_fini) - amdgpu_mmhub_ras_fini(adev); + adev->mmhub.ras_funcs->ras_fini(adev); if (adev->gmc.xgmi.ras_funcs && adev->gmc.xgmi.ras_funcs->ras_fini) adev->gmc.xgmi.ras_funcs->ras_fini(adev); + + if (adev->hdp.ras_funcs && + adev->hdp.ras_funcs->ras_fini) + adev->hdp.ras_funcs->ras_fini(adev); } /* @@ -477,7 +537,7 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev) } /** - * amdgpu_tmz_set -- check and set if a device supports TMZ + * amdgpu_gmc_tmz_set -- check and set if a device supports TMZ * @adev: amdgpu_device pointer * * Check and set if an the device @adev supports Trusted Memory @@ -523,7 +583,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) } /** - * amdgpu_noretry_set -- set per asic noretry defaults + * amdgpu_gmc_noretry_set -- set per asic noretry defaults * @adev: amdgpu_device pointer * * Set a per asic default for the no-retry parameter. @@ -578,13 +638,18 @@ void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, for (i = 0; i < 16; i++) { reg = hub->vm_context0_cntl + hub->ctx_distance * i; - tmp = RREG32(reg); + tmp = (hub_type == AMDGPU_GFXHUB_0) ? + RREG32_SOC15_IP(GC, reg) : + RREG32_SOC15_IP(MMHUB, reg); + if (enable) tmp |= hub->vm_cntx_cntl_vm_fault; else tmp &= ~hub->vm_cntx_cntl_vm_fault; - WREG32(reg, tmp); + (hub_type == AMDGPU_GFXHUB_0) ? + WREG32_SOC15_IP(GC, reg, tmp) : + WREG32_SOC15_IP(MMHUB, reg, tmp); } } @@ -720,3 +785,22 @@ uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo { return amdgpu_bo_gpu_offset(bo) - adev->gmc.vram_start + adev->gmc.aper_base; } + +void amdgpu_gmc_get_reserved_allocation(struct amdgpu_device *adev) +{ + /* Some ASICs need to reserve a region of video memory to avoid access + * from driver */ + adev->mman.stolen_reserved_offset = 0; + adev->mman.stolen_reserved_size = 0; + + switch (adev->asic_type) { + case CHIP_YELLOW_CARP: + if (amdgpu_discovery == 0) { + adev->mman.stolen_reserved_offset = 0x1ffb0000; + adev->mman.stolen_reserved_size = 64 * PAGE_SIZE; + } + break; + default: + break; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 9d11c02a3938..e55201134a01 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -66,9 +66,9 @@ struct firmware; * GMC page fault information */ struct amdgpu_gmc_fault { - uint64_t timestamp; + uint64_t timestamp:48; uint64_t next:AMDGPU_GMC_FAULT_RING_ORDER; - uint64_t key:52; + atomic64_t key; }; /* @@ -318,6 +318,8 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc); bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, uint16_t pasid, uint64_t timestamp); +void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr, + uint16_t pasid); int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev); void amdgpu_gmc_ras_fini(struct amdgpu_device *adev); int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev); @@ -330,6 +332,7 @@ amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type, bool enable); void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev); +void amdgpu_gmc_get_reserved_allocation(struct amdgpu_device *adev); void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev); uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 540c01052b21..ec96e0b26b11 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -22,17 +22,26 @@ * Authors: Christian König */ +#include <drm/ttm/ttm_range_manager.h> + #include "amdgpu.h" -static inline struct amdgpu_gtt_mgr *to_gtt_mgr(struct ttm_resource_manager *man) +struct amdgpu_gtt_node { + struct ttm_buffer_object *tbo; + struct ttm_range_mgr_node base; +}; + +static inline struct amdgpu_gtt_mgr * +to_gtt_mgr(struct ttm_resource_manager *man) { return container_of(man, struct amdgpu_gtt_mgr, manager); } -struct amdgpu_gtt_node { - struct drm_mm_node node; - struct ttm_buffer_object *tbo; -}; +static inline struct amdgpu_gtt_node * +to_amdgpu_gtt_node(struct ttm_resource *res) +{ + return container_of(res, struct amdgpu_gtt_node, base.base); +} /** * DOC: mem_info_gtt_total @@ -43,12 +52,14 @@ struct amdgpu_gtt_node { * the GTT block, in bytes */ static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); + struct ttm_resource_manager *man; + man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); return sysfs_emit(buf, "%llu\n", man->size * PAGE_SIZE); } @@ -61,12 +72,14 @@ static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev, * size of the GTT block, in bytes */ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); + struct ttm_resource_manager *man; + man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(man)); } @@ -75,90 +88,28 @@ static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO, static DEVICE_ATTR(mem_info_gtt_used, S_IRUGO, amdgpu_mem_info_gtt_used_show, NULL); -static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func; -/** - * amdgpu_gtt_mgr_init - init GTT manager and DRM MM - * - * @adev: amdgpu_device pointer - * @gtt_size: maximum size of GTT - * - * Allocate and initialize the GTT manager. - */ -int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) -{ - struct amdgpu_gtt_mgr *mgr = &adev->mman.gtt_mgr; - struct ttm_resource_manager *man = &mgr->manager; - uint64_t start, size; - int ret; - - man->use_tt = true; - man->func = &amdgpu_gtt_mgr_func; - - ttm_resource_manager_init(man, gtt_size >> PAGE_SHIFT); - - start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; - size = (adev->gmc.gart_size >> PAGE_SHIFT) - start; - drm_mm_init(&mgr->mm, start, size); - spin_lock_init(&mgr->lock); - atomic64_set(&mgr->available, gtt_size >> PAGE_SHIFT); - - ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_total); - if (ret) { - DRM_ERROR("Failed to create device file mem_info_gtt_total\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_used); - if (ret) { - DRM_ERROR("Failed to create device file mem_info_gtt_used\n"); - return ret; - } - - ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, &mgr->manager); - ttm_resource_manager_set_used(man, true); - return 0; -} - -/** - * amdgpu_gtt_mgr_fini - free and destroy GTT manager - * - * @adev: amdgpu_device pointer - * - * Destroy and free the GTT manager, returns -EBUSY if ranges are still - * allocated inside it. - */ -void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev) -{ - struct amdgpu_gtt_mgr *mgr = &adev->mman.gtt_mgr; - struct ttm_resource_manager *man = &mgr->manager; - int ret; - - ttm_resource_manager_set_used(man, false); - - ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man); - if (ret) - return; - - spin_lock(&mgr->lock); - drm_mm_takedown(&mgr->mm); - spin_unlock(&mgr->lock); - - device_remove_file(adev->dev, &dev_attr_mem_info_gtt_total); - device_remove_file(adev->dev, &dev_attr_mem_info_gtt_used); +static struct attribute *amdgpu_gtt_mgr_attributes[] = { + &dev_attr_mem_info_gtt_total.attr, + &dev_attr_mem_info_gtt_used.attr, + NULL +}; - ttm_resource_manager_cleanup(man); - ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, NULL); -} +const struct attribute_group amdgpu_gtt_mgr_attr_group = { + .attrs = amdgpu_gtt_mgr_attributes +}; /** * amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space * - * @mem: the mem object to check + * @res: the mem object to check * * Check if a mem object has already address space allocated. */ -bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem) +bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res) { - return mem->mm_node != NULL; + struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res); + + return drm_mm_node_allocated(&node->base.mm_nodes[0]); } /** @@ -174,54 +125,57 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem) static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, - struct ttm_resource *mem) + struct ttm_resource **res) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); + uint32_t num_pages = PFN_UP(tbo->base.size); struct amdgpu_gtt_node *node; int r; spin_lock(&mgr->lock); - if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) && - atomic64_read(&mgr->available) < mem->num_pages) { + if (tbo->resource && tbo->resource->mem_type != TTM_PL_TT && + atomic64_read(&mgr->available) < num_pages) { spin_unlock(&mgr->lock); return -ENOSPC; } - atomic64_sub(mem->num_pages, &mgr->available); + atomic64_sub(num_pages, &mgr->available); spin_unlock(&mgr->lock); - if (!place->lpfn) { - mem->mm_node = NULL; - mem->start = AMDGPU_BO_INVALID_OFFSET; - return 0; - } - - node = kzalloc(sizeof(*node), GFP_KERNEL); + node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL); if (!node) { r = -ENOMEM; goto err_out; } node->tbo = tbo; + ttm_resource_init(tbo, place, &node->base.base); - spin_lock(&mgr->lock); - r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages, - mem->page_alignment, 0, place->fpfn, - place->lpfn, DRM_MM_INSERT_BEST); - spin_unlock(&mgr->lock); - - if (unlikely(r)) - goto err_free; - - mem->mm_node = node; - mem->start = node->node.start; + if (place->lpfn) { + spin_lock(&mgr->lock); + r = drm_mm_insert_node_in_range(&mgr->mm, + &node->base.mm_nodes[0], + num_pages, tbo->page_alignment, + 0, place->fpfn, place->lpfn, + DRM_MM_INSERT_BEST); + spin_unlock(&mgr->lock); + if (unlikely(r)) + goto err_free; + + node->base.base.start = node->base.mm_nodes[0].start; + } else { + node->base.mm_nodes[0].start = 0; + node->base.mm_nodes[0].size = node->base.base.num_pages; + node->base.base.start = AMDGPU_BO_INVALID_OFFSET; + } + *res = &node->base.base; return 0; err_free: kfree(node); err_out: - atomic64_add(mem->num_pages, &mgr->available); + atomic64_add(num_pages, &mgr->available); return r; } @@ -235,19 +189,18 @@ err_out: * Free the allocated GTT again. */ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, - struct ttm_resource *mem) + struct ttm_resource *res) { + struct amdgpu_gtt_node *node = to_amdgpu_gtt_node(res); struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); - struct amdgpu_gtt_node *node = mem->mm_node; - if (node) { - spin_lock(&mgr->lock); - drm_mm_remove_node(&node->node); - spin_unlock(&mgr->lock); - kfree(node); - } + spin_lock(&mgr->lock); + if (drm_mm_node_allocated(&node->base.mm_nodes[0])) + drm_mm_remove_node(&node->base.mm_nodes[0]); + spin_unlock(&mgr->lock); + atomic64_add(res->num_pages, &mgr->available); - atomic64_add(mem->num_pages, &mgr->available); + kfree(node); } /** @@ -265,22 +218,33 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man) return (result > 0 ? result : 0) * PAGE_SIZE; } +/** + * amdgpu_gtt_mgr_recover - re-init gart + * + * @man: TTM memory type manager + * + * Re-init the gart for each known BO in the GTT. + */ int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); + struct amdgpu_device *adev; struct amdgpu_gtt_node *node; struct drm_mm_node *mm_node; int r = 0; + adev = container_of(mgr, typeof(*adev), mman.gtt_mgr); spin_lock(&mgr->lock); drm_mm_for_each_node(mm_node, &mgr->mm) { - node = container_of(mm_node, struct amdgpu_gtt_node, node); + node = container_of(mm_node, typeof(*node), base.mm_nodes[0]); r = amdgpu_ttm_recover_gart(node->tbo); if (r) break; } spin_unlock(&mgr->lock); + amdgpu_gart_invalidate_tlb(adev); + return r; } @@ -311,3 +275,61 @@ static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = { .free = amdgpu_gtt_mgr_del, .debug = amdgpu_gtt_mgr_debug }; + +/** + * amdgpu_gtt_mgr_init - init GTT manager and DRM MM + * + * @adev: amdgpu_device pointer + * @gtt_size: maximum size of GTT + * + * Allocate and initialize the GTT manager. + */ +int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) +{ + struct amdgpu_gtt_mgr *mgr = &adev->mman.gtt_mgr; + struct ttm_resource_manager *man = &mgr->manager; + uint64_t start, size; + + man->use_tt = true; + man->func = &amdgpu_gtt_mgr_func; + + ttm_resource_manager_init(man, gtt_size >> PAGE_SHIFT); + + start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; + size = (adev->gmc.gart_size >> PAGE_SHIFT) - start; + drm_mm_init(&mgr->mm, start, size); + spin_lock_init(&mgr->lock); + atomic64_set(&mgr->available, gtt_size >> PAGE_SHIFT); + + ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, &mgr->manager); + ttm_resource_manager_set_used(man, true); + return 0; +} + +/** + * amdgpu_gtt_mgr_fini - free and destroy GTT manager + * + * @adev: amdgpu_device pointer + * + * Destroy and free the GTT manager, returns -EBUSY if ranges are still + * allocated inside it. + */ +void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev) +{ + struct amdgpu_gtt_mgr *mgr = &adev->mman.gtt_mgr; + struct ttm_resource_manager *man = &mgr->manager; + int ret; + + ttm_resource_manager_set_used(man, false); + + ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man); + if (ret) + return; + + spin_lock(&mgr->lock); + drm_mm_takedown(&mgr->mm); + spin_unlock(&mgr->lock); + + ttm_resource_manager_cleanup(man); + ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, NULL); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c new file mode 100644 index 000000000000..1d50d534d77c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c @@ -0,0 +1,69 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_ras.h" + +int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev) +{ + int r; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + struct ras_fs_if fs_info = { + .sysfs_name = "hdp_err_count", + }; + + if (!adev->hdp.ras_if) { + adev->hdp.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->hdp.ras_if) + return -ENOMEM; + adev->hdp.ras_if->block = AMDGPU_RAS_BLOCK__HDP; + adev->hdp.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->hdp.ras_if->sub_block_index = 0; + strcpy(adev->hdp.ras_if->name, "hdp"); + } + ih_info.head = fs_info.head = *adev->hdp.ras_if; + r = amdgpu_ras_late_init(adev, adev->hdp.ras_if, + &fs_info, &ih_info); + if (r || !amdgpu_ras_is_supported(adev, adev->hdp.ras_if->block)) { + kfree(adev->hdp.ras_if); + adev->hdp.ras_if = NULL; + } + + return r; +} + +void amdgpu_hdp_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP) && + adev->hdp.ras_if) { + struct ras_common_if *ras_if = adev->hdp.ras_if; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h index 43caf9f8cc11..7ec99d591584 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h @@ -23,18 +23,29 @@ #ifndef __AMDGPU_HDP_H__ #define __AMDGPU_HDP_H__ +struct amdgpu_hdp_ras_funcs { + int (*ras_late_init)(struct amdgpu_device *adev); + void (*ras_fini)(struct amdgpu_device *adev); + void (*query_ras_error_count)(struct amdgpu_device *adev, + void *ras_error_status); + void (*reset_ras_error_count)(struct amdgpu_device *adev); +}; + struct amdgpu_hdp_funcs { void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring); void (*invalidate_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring); - void (*reset_ras_error_count)(struct amdgpu_device *adev); void (*update_clock_gating)(struct amdgpu_device *adev, bool enable); void (*get_clock_gating_state)(struct amdgpu_device *adev, u32 *flags); void (*init_registers)(struct amdgpu_device *adev); }; struct amdgpu_hdp { + struct ras_common_if *ras_if; const struct amdgpu_hdp_funcs *funcs; + const struct amdgpu_hdp_ras_funcs *ras_funcs; }; +int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev); +void amdgpu_hdp_ras_fini(struct amdgpu_device *adev); #endif /* __AMDGPU_HDP_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index a2fe2dac32c1..ec65ab0ddf89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -130,7 +130,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, struct amdgpu_device *adev = ring->adev; struct amdgpu_ib *ib = &ibs[0]; struct dma_fence *tmp = NULL; - bool skip_preamble, need_ctx_switch; + bool need_ctx_switch; unsigned patch_offset = ~0; struct amdgpu_vm *vm; uint64_t fence_ctx; @@ -214,20 +214,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (job && ring->funcs->init_cond_exec) patch_offset = amdgpu_ring_init_cond_exec(ring); -#ifdef CONFIG_X86_64 - if (!(adev->flags & AMD_IS_APU)) -#endif - { - if (ring->funcs->emit_hdp_flush) - amdgpu_ring_emit_hdp_flush(ring); - else - amdgpu_asic_flush_hdp(adev, ring); - } + amdgpu_device_flush_hdp(adev, ring); if (need_ctx_switch) status |= AMDGPU_HAVE_CTX_SWITCH; - skip_preamble = ring->current_ctx == fence_ctx; if (job && ring->funcs->emit_cntxcntl) { status |= job->preamble_status; status |= job->preemption_status; @@ -245,14 +236,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, for (i = 0; i < num_ibs; ++i) { ib = &ibs[i]; - /* drop preamble IBs if we don't have a context switch */ - if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && - skip_preamble && - !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) && - !amdgpu_mcbp && - !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */ - continue; - if (job && ring->funcs->emit_frame_cntl) { if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) { amdgpu_ring_emit_frame_cntl(ring, false, secure); @@ -268,10 +251,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if (job && ring->funcs->emit_frame_cntl) amdgpu_ring_emit_frame_cntl(ring, false, secure); -#ifdef CONFIG_X86_64 - if (!(adev->flags & AMD_IS_APU)) -#endif - amdgpu_asic_invalidate_hdp(adev, ring); + amdgpu_device_invalidate_hdp(adev, ring); if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE) fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY; @@ -328,7 +308,7 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev) for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) { if (i == AMDGPU_IB_POOL_DIRECT) - size = PAGE_SIZE * 2; + size = PAGE_SIZE * 6; else size = AMDGPU_IB_POOL_SIZE; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index b4971e90b98c..b7fb72bff2c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, unsigned count; int r; - r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences); + r = dma_resv_get_fences(resv, NULL, &count, &fences); if (r) goto fallback; @@ -156,8 +156,7 @@ fallback: /* Not enough memory for the delayed delete, as last resort * block for all the fences to complete. */ - dma_resv_wait_timeout_rcu(resv, true, false, - MAX_SCHEDULE_TIMEOUT); + dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT); amdgpu_pasid_free(pasid); } @@ -183,7 +182,7 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, } /** - * amdgpu_vm_grab_idle - grab idle VMID + * amdgpu_vmid_grab_idle - grab idle VMID * * @vm: vm to allocate id for * @ring: ring we want to submit job to @@ -256,7 +255,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm, } /** - * amdgpu_vm_grab_reserved - try to assign reserved VMID + * amdgpu_vmid_grab_reserved - try to assign reserved VMID * * @vm: vm to allocate id for * @ring: ring we want to submit job to @@ -325,7 +324,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, } /** - * amdgpu_vm_grab_used - try to reuse a VMID + * amdgpu_vmid_grab_used - try to reuse a VMID * * @vm: vm to allocate id for * @ring: ring we want to submit job to @@ -397,7 +396,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, } /** - * amdgpu_vm_grab_id - allocate the next free VMID + * amdgpu_vmid_grab - allocate the next free VMID * * @vm: vm to allocate id for * @ring: ring we want to submit job to diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index faaa6aa2faaf..f3d62e196901 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -115,9 +115,11 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, */ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { + + if (!ih->ring) + return; + if (ih->use_bus_addr) { - if (!ih->ring) - return; /* add 8 bytes for the rptr/wptr shadows and * add them to the end of the ring allocation. @@ -175,7 +177,9 @@ static bool amdgpu_ih_has_checkpoint_processed(struct amdgpu_device *adev, cur_rptr += ih->ptr_mask + 1; *prev_rptr = cur_rptr; - return cur_rptr >= checkpoint_wptr; + /* check ring is empty to workaround missing wptr overflow flag */ + return cur_rptr >= checkpoint_wptr || + (cur_rptr & ih->ptr_mask) == amdgpu_ih_get_wptr(adev, ih); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 90f50561b43a..32ce0e679dc7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -49,6 +49,7 @@ #include <drm/drm_irq.h> #include <drm/drm_vblank.h> #include <drm/amdgpu_drm.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_ih.h" #include "atom.h" @@ -348,6 +349,25 @@ int amdgpu_irq_init(struct amdgpu_device *adev) return 0; } + +void amdgpu_irq_fini_hw(struct amdgpu_device *adev) +{ + if (adev->irq.installed) { + drm_irq_uninstall(&adev->ddev); + adev->irq.installed = false; + if (adev->irq.msi_enabled) + pci_free_irq_vectors(adev->pdev); + + if (!amdgpu_device_has_dc_support(adev)) + flush_work(&adev->hotplug_work); + } + + amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft); + amdgpu_ih_ring_fini(adev, &adev->irq.ih); + amdgpu_ih_ring_fini(adev, &adev->irq.ih1); + amdgpu_ih_ring_fini(adev, &adev->irq.ih2); +} + /** * amdgpu_irq_fini - shut down interrupt handling * @@ -357,19 +377,10 @@ int amdgpu_irq_init(struct amdgpu_device *adev) * functionality, shuts down vblank, hotplug and reset interrupt handling, * turns off interrupts from all sources (all ASICs). */ -void amdgpu_irq_fini(struct amdgpu_device *adev) +void amdgpu_irq_fini_sw(struct amdgpu_device *adev) { unsigned i, j; - if (adev->irq.installed) { - drm_irq_uninstall(adev_to_drm(adev)); - adev->irq.installed = false; - if (adev->irq.msi_enabled) - pci_free_irq_vectors(adev->pdev); - if (!amdgpu_device_has_dc_support(adev)) - flush_work(&adev->hotplug_work); - } - for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { if (!adev->irq.client[i].sources) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index cf6116648322..78ad4784cc74 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -103,7 +103,8 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev); irqreturn_t amdgpu_irq_handler(int irq, void *arg); int amdgpu_irq_init(struct amdgpu_device *adev); -void amdgpu_irq_fini(struct amdgpu_device *adev); +void amdgpu_irq_fini_sw(struct amdgpu_device *adev); +void amdgpu_irq_fini_hw(struct amdgpu_device *adev); int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned client_id, unsigned src_id, struct amdgpu_irq_src *source); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 759b34799221..d33e6d97cc89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -25,6 +25,8 @@ #include <linux/wait.h> #include <linux/sched.h> +#include <drm/drm_drv.h> + #include "amdgpu.h" #include "amdgpu_trace.h" @@ -34,6 +36,15 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) struct amdgpu_job *job = to_amdgpu_job(s_job); struct amdgpu_task_info ti; struct amdgpu_device *adev = ring->adev; + int idx; + + if (!drm_dev_enter(&adev->ddev, &idx)) { + DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s", + __func__, s_job->sched->name); + + /* Effectively the job is aborted as the device is gone */ + return DRM_GPU_SCHED_STAT_ENODEV; + } memset(&ti, 0, sizeof(struct amdgpu_task_info)); @@ -41,7 +52,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { DRM_ERROR("ring %s timeout, but soft recovered\n", s_job->sched->name); - return DRM_GPU_SCHED_STAT_NOMINAL; + goto exit; } amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); @@ -53,13 +64,15 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) if (amdgpu_device_should_recover_gpu(ring->adev)) { amdgpu_device_gpu_recover(ring->adev, job); - return DRM_GPU_SCHED_STAT_NOMINAL; } else { drm_sched_suspend_timeout(&ring->sched); if (amdgpu_sriov_vf(adev)) adev->virt.tdr_debug = true; - return DRM_GPU_SCHED_STAT_NOMINAL; } + +exit: + drm_dev_exit(idx); + return DRM_GPU_SCHED_STAT_NOMINAL; } int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 39ee88d29cca..96ef3f1051d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -28,6 +28,7 @@ #include "amdgpu.h" #include <drm/amdgpu_drm.h> +#include <drm/drm_drv.h> #include "amdgpu_uvd.h" #include "amdgpu_vce.h" #include "atom.h" @@ -91,8 +92,11 @@ void amdgpu_driver_unload_kms(struct drm_device *dev) pm_runtime_forbid(dev->dev); } + if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD)) + DRM_WARN("smart shift update failed\n"); + amdgpu_acpi_fini(adev); - amdgpu_device_fini(adev); + amdgpu_device_fini_hw(adev); } void amdgpu_register_gpu_instance(struct amdgpu_device *adev) @@ -120,6 +124,22 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev) mutex_unlock(&mgpu_info.mutex); } +static void amdgpu_get_audio_func(struct amdgpu_device *adev) +{ + struct pci_dev *p = NULL; + + p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), + adev->pdev->bus->number, 1); + if (p) { + pm_runtime_get_sync(&p->dev); + + pm_runtime_mark_last_busy(&p->dev); + pm_runtime_put_autosuspend(&p->dev); + + pci_dev_put(p); + } +} + /** * amdgpu_driver_load_kms - Main load function for KMS. * @@ -209,11 +229,40 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) DPM_FLAG_MAY_SKIP_RESUME); pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); + pm_runtime_allow(dev->dev); + pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); + + /* + * For runpm implemented via BACO, PMFW will handle the + * timing for BACO in and out: + * - put ASIC into BACO state only when both video and + * audio functions are in D3 state. + * - pull ASIC out of BACO state when either video or + * audio function is in D0 state. + * Also, at startup, PMFW assumes both functions are in + * D0 state. + * + * So if snd driver was loaded prior to amdgpu driver + * and audio function was put into D3 state, there will + * be no PMFW-aware D-state transition(D0->D3) on runpm + * suspend. Thus the BACO will be not correctly kicked in. + * + * Via amdgpu_get_audio_func(), the audio dev is put + * into D0 state. Then there will be a PMFW-aware D-state + * transition(D0->D3) on runpm suspend. + */ + if (amdgpu_device_supports_baco(dev) && + !(adev->flags & AMD_IS_APU) && + (adev->asic_type >= CHIP_NAVI10)) + amdgpu_get_audio_func(adev); } + if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD)) + DRM_WARN("smart shift update failed\n"); + out: if (r) { /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ @@ -861,6 +910,21 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) min((size_t)size, (size_t)(bios_size - bios_offset))) ? -EFAULT : 0; } + case AMDGPU_INFO_VBIOS_INFO: { + struct drm_amdgpu_info_vbios vbios_info = {}; + struct atom_context *atom_context; + + atom_context = adev->mode_info.atom_context; + memcpy(vbios_info.name, atom_context->name, sizeof(atom_context->name)); + memcpy(vbios_info.vbios_pn, atom_context->vbios_pn, sizeof(atom_context->vbios_pn)); + vbios_info.version = atom_context->version; + memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str, + sizeof(atom_context->vbios_ver_str)); + memcpy(vbios_info.date, atom_context->date, sizeof(atom_context->date)); + + return copy_to_user(out, &vbios_info, + min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0; + } default: DRM_DEBUG_KMS("Invalid request %d\n", info->vbios_info.type); @@ -986,7 +1050,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (!ras) return -EINVAL; - ras_mask = (uint64_t)ras->supported << 32 | ras->features; + ras_mask = (uint64_t)adev->ras_enabled << 32 | ras->features; return copy_to_user(out, &ras_mask, min_t(u64, size, sizeof(ras_mask))) ? @@ -1114,7 +1178,8 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) dev_warn(adev->dev, "No more PASIDs available!"); pasid = 0; } - r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid); + + r = amdgpu_vm_init(adev, &fpriv->vm, pasid); if (r) goto error_pasid; @@ -1197,7 +1262,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, } pasid = fpriv->vm.pasid; - pd = amdgpu_bo_ref(fpriv->vm.root.base.bo); + pd = amdgpu_bo_ref(fpriv->vm.root.bo); amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); amdgpu_vm_fini(adev, &fpriv->vm); @@ -1219,6 +1284,15 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, pm_runtime_put_autosuspend(dev->dev); } + +void amdgpu_driver_release_kms(struct drm_device *dev) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + + amdgpu_device_fini_sw(adev); + pci_set_drvdata(adev->pdev, NULL); +} + /* * VBlank related functions. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h index 11aa29933c1f..b27fcbccce2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h @@ -28,6 +28,7 @@ struct amdgpu_mmhub_ras_funcs { void *ras_error_status); void (*query_ras_error_status)(struct amdgpu_device *adev); void (*reset_ras_error_count)(struct amdgpu_device *adev); + void (*reset_ras_error_status)(struct amdgpu_device *adev); }; struct amdgpu_mmhub_funcs { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 828b5167ff12..d6c54c7f7679 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni, mmu_interval_set_seq(mni, cur_seq); - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, + MAX_SCHEDULE_TIMEOUT); mutex_unlock(&adev->notifier_lock); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); @@ -155,3 +155,89 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) mmu_interval_notifier_remove(&bo->notifier); bo->notifier.mm = NULL; } + +int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, + struct mm_struct *mm, struct page **pages, + uint64_t start, uint64_t npages, + struct hmm_range **phmm_range, bool readonly, + bool mmap_locked) +{ + struct hmm_range *hmm_range; + unsigned long timeout; + unsigned long i; + unsigned long *pfns; + int r = 0; + + hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); + if (unlikely(!hmm_range)) + return -ENOMEM; + + pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); + if (unlikely(!pfns)) { + r = -ENOMEM; + goto out_free_range; + } + + hmm_range->notifier = notifier; + hmm_range->default_flags = HMM_PFN_REQ_FAULT; + if (!readonly) + hmm_range->default_flags |= HMM_PFN_REQ_WRITE; + hmm_range->hmm_pfns = pfns; + hmm_range->start = start; + hmm_range->end = start + npages * PAGE_SIZE; + + /* Assuming 512MB takes maxmium 1 second to fault page address */ + timeout = max(npages >> 17, 1ULL) * HMM_RANGE_DEFAULT_TIMEOUT; + timeout = jiffies + msecs_to_jiffies(timeout); + +retry: + hmm_range->notifier_seq = mmu_interval_read_begin(notifier); + + if (likely(!mmap_locked)) + mmap_read_lock(mm); + + r = hmm_range_fault(hmm_range); + + if (likely(!mmap_locked)) + mmap_read_unlock(mm); + if (unlikely(r)) { + /* + * FIXME: This timeout should encompass the retry from + * mmu_interval_read_retry() as well. + */ + if (r == -EBUSY && !time_after(jiffies, timeout)) + goto retry; + goto out_free_pfns; + } + + /* + * Due to default_flags, all pages are HMM_PFN_VALID or + * hmm_range_fault() fails. FIXME: The pages cannot be touched outside + * the notifier_lock, and mmu_interval_read_retry() must be done first. + */ + for (i = 0; pages && i < npages; i++) + pages[i] = hmm_pfn_to_page(pfns[i]); + + *phmm_range = hmm_range; + + return 0; + +out_free_pfns: + kvfree(pfns); +out_free_range: + kfree(hmm_range); + + return r; +} + +int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range) +{ + int r; + + r = mmu_interval_read_retry(hmm_range->notifier, + hmm_range->notifier_seq); + kvfree(hmm_range->hmm_pfns); + kfree(hmm_range); + + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h index a292238f75eb..7f7d37a457c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h @@ -30,6 +30,13 @@ #include <linux/workqueue.h> #include <linux/interval_tree.h> +int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, + struct mm_struct *mm, struct page **pages, + uint64_t start, uint64_t npages, + struct hmm_range **phmm_range, bool readonly, + bool mmap_locked); +int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); + #if defined(CONFIG_HMM_MIRROR) int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); void amdgpu_mn_unregister(struct amdgpu_bo *bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index cb0b581bbce7..89fb372ed49c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -344,7 +344,7 @@ struct amdgpu_mode_info { /* pointer to fbdev info structure */ struct amdgpu_fbdev *rfbdev; /* firmware flags */ - u16 firmware_flags; + u32 firmware_flags; /* pointer to backlight encoder */ struct amdgpu_encoder *bl_encoder; u8 bl_level; /* saved backlight level */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index f9434bc2f9b2..795fa7445abe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -52,55 +52,44 @@ * */ -/** - * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting - * - * @bo: &amdgpu_bo buffer object - * - * This function is called when a BO stops being pinned, and updates the - * &amdgpu_device pin_size values accordingly. - */ -static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo) +static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) { - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); - if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { - atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size); - atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo), - &adev->visible_pin_size); - } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { - atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size); - } + amdgpu_bo_kunmap(bo); + + if (bo->tbo.base.import_attach) + drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); + drm_gem_object_release(&bo->tbo.base); + amdgpu_bo_unref(&bo->parent); + kvfree(bo); } -static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) +static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo) { - struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); struct amdgpu_bo_user *ubo; - if (bo->tbo.pin_count > 0) - amdgpu_bo_subtract_pin_size(bo); + ubo = to_amdgpu_bo_user(bo); + kfree(ubo->metadata); + amdgpu_bo_destroy(tbo); +} - amdgpu_bo_kunmap(bo); +static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); + struct amdgpu_bo_vm *vmbo; - if (bo->tbo.base.import_attach) - drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); - drm_gem_object_release(&bo->tbo.base); + vmbo = to_amdgpu_bo_vm(bo); /* in case amdgpu_device_recover_vram got NULL of bo->parent */ - if (!list_empty(&bo->shadow_list)) { + if (!list_empty(&vmbo->shadow_list)) { mutex_lock(&adev->shadow_list_lock); - list_del_init(&bo->shadow_list); + list_del_init(&vmbo->shadow_list); mutex_unlock(&adev->shadow_list_lock); } - amdgpu_bo_unref(&bo->parent); - - if (bo->tbo.type == ttm_bo_type_device) { - ubo = to_amdgpu_bo_user(bo); - kfree(ubo->metadata); - } - kvfree(bo); + amdgpu_bo_destroy(tbo); } /** @@ -115,8 +104,11 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) */ bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) { - if (bo->destroy == &amdgpu_bo_destroy) + if (bo->destroy == &amdgpu_bo_destroy || + bo->destroy == &amdgpu_bo_user_destroy || + bo->destroy == &amdgpu_bo_vm_destroy) return true; + return false; } @@ -157,7 +149,9 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) if (domain & AMDGPU_GEM_DOMAIN_GTT) { places[c].fpfn = 0; places[c].lpfn = 0; - places[c].mem_type = TTM_PL_TT; + places[c].mem_type = + abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ? + AMDGPU_PL_PREEMPT : TTM_PL_TT; places[c].flags = 0; c++; } @@ -386,14 +380,14 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, if (cpu_addr) amdgpu_bo_kunmap(*bo_ptr); - ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem); + ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource); for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) { (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT; (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; } r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement, - &(*bo_ptr)->tbo.mem, &ctx); + &(*bo_ptr)->tbo.resource, &ctx); if (r) goto error; @@ -515,7 +509,18 @@ bool amdgpu_bo_support_uswc(u64 bo_flags) #endif } -static int amdgpu_bo_do_create(struct amdgpu_device *adev, +/** + * amdgpu_bo_create - create an &amdgpu_bo buffer object + * @adev: amdgpu device object + * @bp: parameters to be used for the buffer object + * @bo_ptr: pointer to the buffer object pointer + * + * Creates an &amdgpu_bo buffer object. + * + * Returns: + * 0 for success or a negative error code on failure. + */ +int amdgpu_bo_create(struct amdgpu_device *adev, struct amdgpu_bo_param *bp, struct amdgpu_bo **bo_ptr) { @@ -556,7 +561,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, if (bo == NULL) return -ENOMEM; drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size); - INIT_LIST_HEAD(&bo->shadow_list); bo->vm_bo = NULL; bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain : bp->domain; @@ -579,22 +583,25 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, if (bp->type == ttm_bo_type_kernel) bo->tbo.priority = 1; + if (!bp->destroy) + bp->destroy = &amdgpu_bo_destroy; + r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type, &bo->placement, page_align, &ctx, NULL, - bp->resv, &amdgpu_bo_destroy); + bp->resv, bp->destroy); if (unlikely(r != 0)) return r; if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && - bo->tbo.mem.mem_type == TTM_PL_VRAM && - bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT) + bo->tbo.resource->mem_type == TTM_PL_VRAM && + bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT) amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, ctx.bytes_moved); else amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && - bo->tbo.mem.mem_type == TTM_PL_VRAM) { + bo->tbo.resource->mem_type == TTM_PL_VRAM) { struct dma_fence *fence; r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence); @@ -625,108 +632,68 @@ fail_unreserve: return r; } -static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, - unsigned long size, - struct amdgpu_bo *bo) -{ - struct amdgpu_bo_param bp; - int r; - - if (bo->shadow) - return 0; - - memset(&bp, 0, sizeof(bp)); - bp.size = size; - bp.domain = AMDGPU_GEM_DOMAIN_GTT; - bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC | - AMDGPU_GEM_CREATE_SHADOW; - bp.type = ttm_bo_type_kernel; - bp.resv = bo->tbo.base.resv; - bp.bo_ptr_size = sizeof(struct amdgpu_bo); - - r = amdgpu_bo_do_create(adev, &bp, &bo->shadow); - if (!r) { - bo->shadow->parent = amdgpu_bo_ref(bo); - mutex_lock(&adev->shadow_list_lock); - list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list); - mutex_unlock(&adev->shadow_list_lock); - } - - return r; -} - /** - * amdgpu_bo_create - create an &amdgpu_bo buffer object + * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object * @adev: amdgpu device object * @bp: parameters to be used for the buffer object - * @bo_ptr: pointer to the buffer object pointer + * @ubo_ptr: pointer to the buffer object pointer * - * Creates an &amdgpu_bo buffer object; and if requested, also creates a - * shadow object. - * Shadow object is used to backup the original buffer object, and is always - * in GTT. + * Create a BO to be used by user application; * * Returns: * 0 for success or a negative error code on failure. */ -int amdgpu_bo_create(struct amdgpu_device *adev, - struct amdgpu_bo_param *bp, - struct amdgpu_bo **bo_ptr) + +int amdgpu_bo_create_user(struct amdgpu_device *adev, + struct amdgpu_bo_param *bp, + struct amdgpu_bo_user **ubo_ptr) { - u64 flags = bp->flags; + struct amdgpu_bo *bo_ptr; int r; - bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW; - - r = amdgpu_bo_do_create(adev, bp, bo_ptr); + bp->bo_ptr_size = sizeof(struct amdgpu_bo_user); + bp->destroy = &amdgpu_bo_user_destroy; + r = amdgpu_bo_create(adev, bp, &bo_ptr); if (r) return r; - if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) { - if (!bp->resv) - WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv, - NULL)); - - r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr); - - if (!bp->resv) - dma_resv_unlock((*bo_ptr)->tbo.base.resv); - - if (r) - amdgpu_bo_unref(bo_ptr); - } - + *ubo_ptr = to_amdgpu_bo_user(bo_ptr); return r; } /** - * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object + * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object * @adev: amdgpu device object * @bp: parameters to be used for the buffer object - * @ubo_ptr: pointer to the buffer object pointer + * @vmbo_ptr: pointer to the buffer object pointer * - * Create a BO to be used by user application; + * Create a BO to be for GPUVM. * * Returns: * 0 for success or a negative error code on failure. */ -int amdgpu_bo_create_user(struct amdgpu_device *adev, - struct amdgpu_bo_param *bp, - struct amdgpu_bo_user **ubo_ptr) +int amdgpu_bo_create_vm(struct amdgpu_device *adev, + struct amdgpu_bo_param *bp, + struct amdgpu_bo_vm **vmbo_ptr) { struct amdgpu_bo *bo_ptr; int r; - bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW; - bp->bo_ptr_size = sizeof(struct amdgpu_bo_user); - r = amdgpu_bo_do_create(adev, bp, &bo_ptr); + /* bo_ptr_size will be determined by the caller and it depends on + * num of amdgpu_vm_pt entries. + */ + BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm)); + bp->destroy = &amdgpu_bo_vm_destroy; + r = amdgpu_bo_create(adev, bp, &bo_ptr); if (r) return r; - *ubo_ptr = to_amdgpu_bo_user(bo_ptr); + *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr); + INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list); return r; } + /** * amdgpu_bo_validate - validate an &amdgpu_bo buffer object * @bo: pointer to the buffer object @@ -762,6 +729,22 @@ retry: } /** + * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list + * + * @bo: BO that will be inserted into the shadow list + * + * Insert a BO to the shadow list. + */ +void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(vmbo->bo.tbo.bdev); + + mutex_lock(&adev->shadow_list_lock); + list_add_tail(&vmbo->shadow_list, &adev->shadow_list); + mutex_unlock(&adev->shadow_list_lock); +} + +/** * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow * * @shadow: &amdgpu_bo shadow to be restored @@ -815,12 +798,12 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) return 0; } - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false, - MAX_SCHEDULE_TIMEOUT); + r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false, + MAX_SCHEDULE_TIMEOUT); if (r < 0) return r; - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap); + r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap); if (r) return r; @@ -943,8 +926,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, domain = amdgpu_bo_get_preferred_pin_domain(adev, domain); if (bo->tbo.pin_count) { - uint32_t mem_type = bo->tbo.mem.mem_type; - uint32_t mem_flags = bo->tbo.mem.placement; + uint32_t mem_type = bo->tbo.resource->mem_type; + uint32_t mem_flags = bo->tbo.resource->placement; if (!(domain & amdgpu_mem_type_to_domain(mem_type))) return -EINVAL; @@ -994,7 +977,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, ttm_bo_pin(&bo->tbo); - domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); if (domain == AMDGPU_GEM_DOMAIN_VRAM) { atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size); atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo), @@ -1037,14 +1020,22 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) */ void amdgpu_bo_unpin(struct amdgpu_bo *bo) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + ttm_bo_unpin(&bo->tbo); if (bo->tbo.pin_count) return; - amdgpu_bo_subtract_pin_size(bo); - if (bo->tbo.base.import_attach) dma_buf_unpin(bo->tbo.base.import_attach); + + if (bo->tbo.resource->mem_type == TTM_PL_VRAM) { + atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size); + atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo), + &adev->visible_pin_size); + } else if (bo->tbo.resource->mem_type == TTM_PL_TT) { + atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size); + } } /** @@ -1123,10 +1114,6 @@ int amdgpu_bo_init(struct amdgpu_device *adev) void amdgpu_bo_fini(struct amdgpu_device *adev) { amdgpu_ttm_fini(adev); - if (!adev->gmc.xgmi.connected_to_cpu) { - arch_phys_wc_del(adev->gmc.vram_mtrr); - arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); - } } /** @@ -1246,6 +1233,9 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, BUG_ON(bo->tbo.type == ttm_bo_type_kernel); ubo = to_amdgpu_bo_user(bo); + if (metadata_size) + *metadata_size = ubo->metadata_size; + if (buffer) { if (buffer_size < ubo->metadata_size) return -EINVAL; @@ -1254,8 +1244,6 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, memcpy(buffer, ubo->metadata, ubo->metadata_size); } - if (metadata_size) - *metadata_size = ubo->metadata_size; if (flags) *flags = ubo->metadata_flags; @@ -1278,7 +1266,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_bo *abo; - struct ttm_resource *old_mem = &bo->mem; + struct ttm_resource *old_mem = bo->resource; if (!amdgpu_bo_is_amdgpu_bo(bo)) return; @@ -1289,7 +1277,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, amdgpu_bo_kunmap(abo); if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach && - bo->mem.mem_type != TTM_PL_SYSTEM) + bo->resource->mem_type != TTM_PL_SYSTEM) dma_buf_move_notify(abo->tbo.base.dma_buf); /* remember the eviction */ @@ -1304,6 +1292,26 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type); } +void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem, + uint64_t *gtt_mem, uint64_t *cpu_mem) +{ + unsigned int domain; + + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); + switch (domain) { + case AMDGPU_GEM_DOMAIN_VRAM: + *vram_mem += amdgpu_bo_size(bo); + break; + case AMDGPU_GEM_DOMAIN_GTT: + *gtt_mem += amdgpu_bo_size(bo); + break; + case AMDGPU_GEM_DOMAIN_CPU: + default: + *cpu_mem += amdgpu_bo_size(bo); + break; + } +} + /** * amdgpu_bo_release_notify - notification about a BO being released * @bo: pointer to a buffer object @@ -1331,7 +1339,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) if (bo->base.resv == &bo->base._resv) amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo); - if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node || + if (bo->resource->mem_type != TTM_PL_VRAM || !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) return; @@ -1362,18 +1370,17 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct ttm_operation_ctx ctx = { false, false }; struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); - unsigned long offset, size; + unsigned long offset; int r; /* Remember that this BO was accessed by the CPU */ abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - if (bo->mem.mem_type != TTM_PL_VRAM) + if (bo->resource->mem_type != TTM_PL_VRAM) return 0; - size = bo->mem.num_pages << PAGE_SHIFT; - offset = bo->mem.start << PAGE_SHIFT; - if ((offset + size) <= adev->gmc.visible_vram_size) + offset = bo->resource->start << PAGE_SHIFT; + if ((offset + bo->base.size) <= adev->gmc.visible_vram_size) return 0; /* Can't move a pinned BO to visible VRAM */ @@ -1395,10 +1402,10 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) else if (unlikely(r)) return VM_FAULT_SIGBUS; - offset = bo->mem.start << PAGE_SHIFT; + offset = bo->resource->start << PAGE_SHIFT; /* this should never happen */ - if (bo->mem.mem_type == TTM_PL_VRAM && - (offset + size) > adev->gmc.visible_vram_size) + if (bo->resource->mem_type == TTM_PL_VRAM && + (offset + bo->base.size) > adev->gmc.visible_vram_size) return VM_FAULT_SIGBUS; ttm_bo_move_to_lru_tail_unlocked(bo); @@ -1482,11 +1489,11 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr) */ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) { - WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); + WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM); WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) && !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel); - WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); - WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM && + WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET); + WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM && !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)); return amdgpu_bo_gpu_offset_no_check(bo); @@ -1504,8 +1511,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo) struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); uint64_t offset; - offset = (bo->tbo.mem.start << PAGE_SHIFT) + - amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type); + offset = (bo->tbo.resource->start << PAGE_SHIFT) + + amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type); return amdgpu_gmc_sign_extend(offset); } @@ -1558,7 +1565,7 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) unsigned int pin_count; u64 size; - domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); switch (domain) { case AMDGPU_GEM_DOMAIN_VRAM: placement = "VRAM"; @@ -1592,7 +1599,6 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS); amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC); amdgpu_bo_print_flag(m, bo, VRAM_CLEARED); - amdgpu_bo_print_flag(m, bo, SHADOW); amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS); amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID); amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 2d1fefbe1e99..38c834d0f930 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -30,6 +30,8 @@ #include <drm/amdgpu_drm.h> #include "amdgpu.h" +#include "amdgpu_res_cursor.h" + #ifdef CONFIG_MMU_NOTIFIER #include <linux/mmu_notifier.h> #endif @@ -37,7 +39,12 @@ #define AMDGPU_BO_INVALID_OFFSET LONG_MAX #define AMDGPU_BO_MAX_PLACEMENTS 3 +/* BO flag to indicate a KFD userptr BO */ +#define AMDGPU_AMDKFD_CREATE_USERPTR_BO (1ULL << 63) +#define AMDGPU_AMDKFD_CREATE_SVM_BO (1ULL << 62) + #define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo) +#define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo) struct amdgpu_bo_param { unsigned long size; @@ -48,7 +55,8 @@ struct amdgpu_bo_param { u64 flags; enum ttm_bo_type type; bool no_wait_gpu; - struct dma_resv *resv; + struct dma_resv *resv; + void (*destroy)(struct ttm_buffer_object *bo); }; /* bo virtual addresses in a vm */ @@ -97,16 +105,10 @@ struct amdgpu_bo { struct amdgpu_vm_bo_base *vm_bo; /* Constant after initialization */ struct amdgpu_bo *parent; - struct amdgpu_bo *shadow; - - #ifdef CONFIG_MMU_NOTIFIER struct mmu_interval_notifier notifier; #endif - - struct list_head shadow_list; - struct kgd_mem *kfd_bo; }; @@ -119,6 +121,13 @@ struct amdgpu_bo_user { }; +struct amdgpu_bo_vm { + struct amdgpu_bo bo; + struct amdgpu_bo *shadow; + struct list_head shadow_list; + struct amdgpu_vm_bo_base entries[]; +}; + static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) { return container_of(tbo, struct amdgpu_bo, tbo); @@ -191,7 +200,7 @@ static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo) static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo) { - return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; + return (bo->tbo.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; } /** @@ -211,18 +220,19 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; - struct drm_mm_node *node = bo->tbo.mem.mm_node; - unsigned long pages_left; + struct amdgpu_res_cursor cursor; - if (bo->tbo.mem.mem_type != TTM_PL_VRAM) + if (bo->tbo.resource->mem_type != TTM_PL_VRAM) return false; - for (pages_left = bo->tbo.mem.num_pages; pages_left; - pages_left -= node->size, node++) - if (node->start < fpfn) + amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor); + while (cursor.remaining) { + if (cursor.start < adev->gmc.visible_vram_size) return true; + amdgpu_res_next(&cursor, cursor.size); + } + return false; } @@ -245,6 +255,22 @@ static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo) return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED; } +/** + * amdgpu_bo_shadowed - check if the BO is shadowed + * + * @bo: BO to be tested. + * + * Returns: + * NULL if not shadowed or else return a BO pointer. + */ +static inline struct amdgpu_bo *amdgpu_bo_shadowed(struct amdgpu_bo *bo) +{ + if (bo->tbo.type == ttm_bo_type_kernel) + return to_amdgpu_bo_vm(bo)->shadow; + + return NULL; +} + bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain); @@ -265,6 +291,9 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, int amdgpu_bo_create_user(struct amdgpu_device *adev, struct amdgpu_bo_param *bp, struct amdgpu_bo_user **ubo_ptr); +int amdgpu_bo_create_vm(struct amdgpu_device *adev, + struct amdgpu_bo_param *bp, + struct amdgpu_bo_vm **ubo_ptr); void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, void **cpu_addr); int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); @@ -300,6 +329,9 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); int amdgpu_bo_validate(struct amdgpu_bo *bo); +void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem, + uint64_t *gtt_mem, uint64_t *cpu_mem); +void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo); int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence); uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c new file mode 100644 index 000000000000..d02c8637f909 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright 2016-2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König, Felix Kuehling + */ + +#include "amdgpu.h" + +static inline struct amdgpu_preempt_mgr * +to_preempt_mgr(struct ttm_resource_manager *man) +{ + return container_of(man, struct amdgpu_preempt_mgr, manager); +} + +/** + * DOC: mem_info_preempt_used + * + * The amdgpu driver provides a sysfs API for reporting current total amount of + * used preemptible memory. + * The file mem_info_preempt_used is used for this, and returns the current + * used size of the preemptible block, in bytes + */ +static ssize_t mem_info_preempt_used_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + struct ttm_resource_manager *man; + + man = ttm_manager_type(&adev->mman.bdev, AMDGPU_PL_PREEMPT); + return sysfs_emit(buf, "%llu\n", amdgpu_preempt_mgr_usage(man)); +} + +static DEVICE_ATTR_RO(mem_info_preempt_used); + +/** + * amdgpu_preempt_mgr_new - allocate a new node + * + * @man: TTM memory type manager + * @tbo: TTM BO we need this range for + * @place: placement flags and restrictions + * @mem: the resulting mem object + * + * Dummy, just count the space used without allocating resources or any limit. + */ +static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man, + struct ttm_buffer_object *tbo, + const struct ttm_place *place, + struct ttm_resource **res) +{ + struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man); + + *res = kzalloc(sizeof(**res), GFP_KERNEL); + if (!*res) + return -ENOMEM; + + ttm_resource_init(tbo, place, *res); + (*res)->start = AMDGPU_BO_INVALID_OFFSET; + + atomic64_add((*res)->num_pages, &mgr->used); + return 0; +} + +/** + * amdgpu_preempt_mgr_del - free ranges + * + * @man: TTM memory type manager + * @mem: TTM memory object + * + * Free the allocated GTT again. + */ +static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man, + struct ttm_resource *res) +{ + struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man); + + atomic64_sub(res->num_pages, &mgr->used); + kfree(res); +} + +/** + * amdgpu_preempt_mgr_usage - return usage of PREEMPT domain + * + * @man: TTM memory type manager + * + * Return how many bytes are used in the GTT domain + */ +uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man) +{ + struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man); + s64 result = atomic64_read(&mgr->used); + + return (result > 0 ? result : 0) * PAGE_SIZE; +} + +/** + * amdgpu_preempt_mgr_debug - dump VRAM table + * + * @man: TTM memory type manager + * @printer: DRM printer to use + * + * Dump the table content using printk. + */ +static void amdgpu_preempt_mgr_debug(struct ttm_resource_manager *man, + struct drm_printer *printer) +{ + struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man); + + drm_printf(printer, "man size:%llu pages, preempt used:%lld pages\n", + man->size, (u64)atomic64_read(&mgr->used)); +} + +static const struct ttm_resource_manager_func amdgpu_preempt_mgr_func = { + .alloc = amdgpu_preempt_mgr_new, + .free = amdgpu_preempt_mgr_del, + .debug = amdgpu_preempt_mgr_debug +}; + +/** + * amdgpu_preempt_mgr_init - init PREEMPT manager and DRM MM + * + * @adev: amdgpu_device pointer + * + * Allocate and initialize the GTT manager. + */ +int amdgpu_preempt_mgr_init(struct amdgpu_device *adev) +{ + struct amdgpu_preempt_mgr *mgr = &adev->mman.preempt_mgr; + struct ttm_resource_manager *man = &mgr->manager; + int ret; + + man->use_tt = true; + man->func = &amdgpu_preempt_mgr_func; + + ttm_resource_manager_init(man, (1 << 30)); + + atomic64_set(&mgr->used, 0); + + ret = device_create_file(adev->dev, &dev_attr_mem_info_preempt_used); + if (ret) { + DRM_ERROR("Failed to create device file mem_info_preempt_used\n"); + return ret; + } + + ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT, + &mgr->manager); + ttm_resource_manager_set_used(man, true); + return 0; +} + +/** + * amdgpu_preempt_mgr_fini - free and destroy GTT manager + * + * @adev: amdgpu_device pointer + * + * Destroy and free the GTT manager, returns -EBUSY if ranges are still + * allocated inside it. + */ +void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev) +{ + struct amdgpu_preempt_mgr *mgr = &adev->mman.preempt_mgr; + struct ttm_resource_manager *man = &mgr->manager; + int ret; + + ttm_resource_manager_set_used(man, false); + + ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man); + if (ret) + return; + + device_remove_file(adev->dev, &dev_attr_mem_info_preempt_used); + + ttm_resource_manager_cleanup(man); + ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT, NULL); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index a09483beb968..3ec5099ffeb6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -25,6 +25,7 @@ #include <linux/firmware.h> #include <linux/dma-mapping.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_psp.h" @@ -38,6 +39,9 @@ #include "amdgpu_ras.h" #include "amdgpu_securedisplay.h" +#include "amdgpu_atomfirmware.h" + +#include <drm/drm_drv.h> static int psp_sysfs_init(struct amdgpu_device *adev); static void psp_sysfs_fini(struct amdgpu_device *adev); @@ -104,6 +108,7 @@ static int psp_early_init(void *handle) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: psp_v11_0_set_psp_funcs(psp); psp->autoload_supported = true; break; @@ -113,6 +118,10 @@ static int psp_early_init(void *handle) case CHIP_ALDEBARAN: psp_v13_0_set_psp_funcs(psp); break; + case CHIP_YELLOW_CARP: + psp_v13_0_set_psp_funcs(psp); + psp->autoload_supported = true; + break; default: return -EINVAL; } @@ -162,11 +171,81 @@ Err_out: return ret; } +/* + * Helper funciton to query psp runtime database entry + * + * @adev: amdgpu_device pointer + * @entry_type: the type of psp runtime database entry + * @db_entry: runtime database entry pointer + * + * Return false if runtime database doesn't exit or entry is invalid + * or true if the specific database entry is found, and copy to @db_entry + */ +static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, + enum psp_runtime_entry_type entry_type, + void *db_entry) +{ + uint64_t db_header_pos, db_dir_pos; + struct psp_runtime_data_header db_header = {0}; + struct psp_runtime_data_directory db_dir = {0}; + bool ret = false; + int i; + + db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET; + db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header); + + /* read runtime db header from vram */ + amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header, + sizeof(struct psp_runtime_data_header), false); + + if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) { + /* runtime db doesn't exist, exit */ + dev_warn(adev->dev, "PSP runtime database doesn't exist\n"); + return false; + } + + /* read runtime database entry from vram */ + amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir, + sizeof(struct psp_runtime_data_directory), false); + + if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) { + /* invalid db entry count, exit */ + dev_warn(adev->dev, "Invalid PSP runtime database entry count\n"); + return false; + } + + /* look up for requested entry type */ + for (i = 0; i < db_dir.entry_count && !ret; i++) { + if (db_dir.entry_list[i].entry_type == entry_type) { + switch (entry_type) { + case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG: + if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) { + /* invalid db entry size */ + dev_warn(adev->dev, "Invalid PSP runtime database entry size\n"); + return false; + } + /* read runtime database entry */ + amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset, + (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false); + ret = true; + break; + default: + ret = false; + break; + } + } + } + + return ret; +} + static int psp_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct psp_context *psp = &adev->psp; int ret; + struct psp_runtime_boot_cfg_entry boot_cfg_entry; + struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx; if (!amdgpu_sriov_vf(adev)) { ret = psp_init_microcode(psp); @@ -174,17 +253,47 @@ static int psp_sw_init(void *handle) DRM_ERROR("Failed to load psp firmware!\n"); return ret; } + } else if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_ALDEBARAN) { + ret = psp_init_ta_microcode(psp, "aldebaran"); + if (ret) { + DRM_ERROR("Failed to initialize ta microcode!\n"); + return ret; + } } - ret = psp_memory_training_init(psp); - if (ret) { - DRM_ERROR("Failed to initialize memory training!\n"); - return ret; + memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry)); + if (psp_get_runtime_db_entry(adev, + PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG, + &boot_cfg_entry)) { + psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask; + if ((psp->boot_cfg_bitmask) & + BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) { + /* If psp runtime database exists, then + * only enable two stage memory training + * when TWO_STAGE_DRAM_TRAINING bit is set + * in runtime database */ + mem_training_ctx->enable_mem_training = true; + } + + } else { + /* If psp runtime database doesn't exist or + * is invalid, force enable two stage memory + * training */ + mem_training_ctx->enable_mem_training = true; } - ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); - if (ret) { - DRM_ERROR("Failed to process memory training!\n"); - return ret; + + if (mem_training_ctx->enable_mem_training) { + ret = psp_memory_training_init(psp); + if (ret) { + DRM_ERROR("Failed to initialize memory training!\n"); + return ret; + } + + ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); + if (ret) { + DRM_ERROR("Failed to process memory training!\n"); + return ret; + } } if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) { @@ -229,7 +338,7 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index, int i; struct amdgpu_device *adev = psp->adev; - if (psp->adev->in_pci_err_recovery) + if (psp->adev->no_hw_access) return 0; for (i = 0; i < adev->usec_timeout; i++) { @@ -253,12 +362,15 @@ psp_cmd_submit_buf(struct psp_context *psp, struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr) { int ret; - int index; + int index, idx; int timeout = 20000; bool ras_intr = false; bool skip_unsupport = false; - if (psp->adev->in_pci_err_recovery) + if (psp->adev->no_hw_access) + return 0; + + if (!drm_dev_enter(&psp->adev->ddev, &idx)) return 0; mutex_lock(&psp->mutex); @@ -271,11 +383,10 @@ psp_cmd_submit_buf(struct psp_context *psp, ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index); if (ret) { atomic_dec(&psp->fence_value); - mutex_unlock(&psp->mutex); - return ret; + goto exit; } - amdgpu_asic_invalidate_hdp(psp->adev, NULL); + amdgpu_device_invalidate_hdp(psp->adev, NULL); while (*((unsigned int *)psp->fence_buf) != index) { if (--timeout == 0) break; @@ -288,7 +399,7 @@ psp_cmd_submit_buf(struct psp_context *psp, if (ras_intr) break; usleep_range(10, 100); - amdgpu_asic_invalidate_hdp(psp->adev, NULL); + amdgpu_device_invalidate_hdp(psp->adev, NULL); } /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */ @@ -312,8 +423,8 @@ psp_cmd_submit_buf(struct psp_context *psp, psp->cmd_buf_mem->cmd_id, psp->cmd_buf_mem->resp.status); if (!timeout) { - mutex_unlock(&psp->mutex); - return -EINVAL; + ret = -EINVAL; + goto exit; } } @@ -321,8 +432,10 @@ psp_cmd_submit_buf(struct psp_context *psp, ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo; ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi; } - mutex_unlock(&psp->mutex); +exit: + mutex_unlock(&psp->mutex); + drm_dev_exit(idx); return ret; } @@ -366,8 +479,7 @@ static int psp_load_toc(struct psp_context *psp, if (!cmd) return -ENOMEM; /* Copy toc to psp firmware private buffer */ - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - memcpy(psp->fw_pri_buf, psp->toc_start_addr, psp->toc_bin_size); + psp_copy_fw(psp, psp->toc_start_addr, psp->toc_bin_size); psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc_bin_size); @@ -417,31 +529,12 @@ static int psp_tmr_init(struct psp_context *psp) return ret; } -static int psp_clear_vf_fw(struct psp_context *psp) -{ - int ret; - struct psp_gfx_cmd_resp *cmd; - - if (!amdgpu_sriov_vf(psp->adev) || psp->adev->asic_type != CHIP_NAVI12) - return 0; - - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); - if (!cmd) - return -ENOMEM; - - cmd->cmd_id = GFX_CMD_ID_CLEAR_VF_FW; - - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - kfree(cmd); - - return ret; -} - static bool psp_skip_tmr(struct psp_context *psp) { switch (psp->adev->asic_type) { case CHIP_NAVI12: case CHIP_SIENNA_CICHLID: + case CHIP_ALDEBARAN: return true; default: return false; @@ -552,20 +645,43 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp, return ret; } -static int psp_boot_config_set(struct amdgpu_device *adev) +static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) { struct psp_context *psp = &adev->psp; struct psp_gfx_cmd_resp *cmd = psp->cmd; + int ret; - if (adev->asic_type != CHIP_SIENNA_CICHLID || amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev)) + return 0; + + memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); + + cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; + cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET; + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + if (!ret) { + *boot_cfg = + (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0; + } + + return ret; +} + +static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg) +{ + struct psp_context *psp = &adev->psp; + struct psp_gfx_cmd_resp *cmd = psp->cmd; + + if (amdgpu_sriov_vf(adev)) return 0; memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp)); cmd->cmd_id = GFX_CMD_ID_BOOT_CFG; cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET; - cmd->cmd.boot_cfg.boot_config = BOOT_CONFIG_GECC; - cmd->cmd.boot_cfg.boot_config_valid = BOOT_CONFIG_GECC; + cmd->cmd.boot_cfg.boot_config = boot_cfg; + cmd->cmd.boot_cfg.boot_config_valid = boot_cfg; return psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); } @@ -621,8 +737,7 @@ static int psp_asd_load(struct psp_context *psp) if (!cmd) return -ENOMEM; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size); + psp_copy_fw(psp, psp->asd_start_addr, psp->asd_ucode_size); psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->asd_ucode_size); @@ -696,6 +811,8 @@ int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg, psp_prep_reg_prog_cmd_buf(cmd, reg, value); ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + if (ret) + DRM_ERROR("PSP failed to program reg id %d", reg); kfree(cmd); return ret; @@ -777,8 +894,7 @@ static int psp_xgmi_load(struct psp_context *psp) if (!cmd) return -ENOMEM; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size); + psp_copy_fw(psp, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size); psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, @@ -1034,8 +1150,14 @@ static int psp_ras_load(struct psp_context *psp) if (!cmd) return -ENOMEM; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size); + psp_copy_fw(psp, psp->ta_ras_start_addr, psp->ta_ras_ucode_size); + + ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; + + if (psp->adev->gmc.xgmi.connected_to_cpu) + ras_cmd->ras_in_message.init_flags.poison_mode_en = 1; + else + ras_cmd->ras_in_message.init_flags.dgpu_mode = 1; psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, @@ -1046,8 +1168,6 @@ static int psp_ras_load(struct psp_context *psp) ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf; - if (!ret) { psp->ras.session_id = cmd->resp.session_id; @@ -1128,6 +1248,31 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) return ret; } +static int psp_ras_status_to_errno(struct amdgpu_device *adev, + enum ta_ras_status ras_status) +{ + int ret = -EINVAL; + + switch (ras_status) { + case TA_RAS_STATUS__SUCCESS: + ret = 0; + break; + case TA_RAS_STATUS__RESET_NEEDED: + ret = -EAGAIN; + break; + case TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE: + dev_warn(adev->dev, "RAS WARN: ras function unavailable\n"); + break; + case TA_RAS_STATUS__ERROR_ASD_READ_WRITE: + dev_warn(adev->dev, "RAS WARN: asd read or write failed\n"); + break; + default: + dev_err(adev->dev, "RAS ERROR: ras function failed ret 0x%X\n", ret); + } + + return ret; +} + int psp_ras_enable_features(struct psp_context *psp, union ta_ras_cmd_input *info, bool enable) { @@ -1151,7 +1296,7 @@ int psp_ras_enable_features(struct psp_context *psp, if (ret) return -EINVAL; - return ras_cmd->ras_status; + return psp_ras_status_to_errno(psp->adev, ras_cmd->ras_status); } static int psp_ras_terminate(struct psp_context *psp) @@ -1184,19 +1329,62 @@ static int psp_ras_terminate(struct psp_context *psp) static int psp_ras_initialize(struct psp_context *psp) { int ret; + uint32_t boot_cfg = 0xFF; + struct amdgpu_device *adev = psp->adev; /* * TODO: bypass the initialize in sriov for now */ - if (amdgpu_sriov_vf(psp->adev)) + if (amdgpu_sriov_vf(adev)) return 0; - if (!psp->adev->psp.ta_ras_ucode_size || - !psp->adev->psp.ta_ras_start_addr) { - dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n"); + if (!adev->psp.ta_ras_ucode_size || + !adev->psp.ta_ras_start_addr) { + dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n"); return 0; } + if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) { + /* query GECC enablement status from boot config + * boot_cfg: 1: GECC is enabled or 0: GECC is disabled + */ + ret = psp_boot_config_get(adev, &boot_cfg); + if (ret) + dev_warn(adev->dev, "PSP get boot config failed\n"); + + if (!amdgpu_ras_is_supported(psp->adev, AMDGPU_RAS_BLOCK__UMC)) { + if (!boot_cfg) { + dev_info(adev->dev, "GECC is disabled\n"); + } else { + /* disable GECC in next boot cycle if ras is + * disabled by module parameter amdgpu_ras_enable + * and/or amdgpu_ras_mask, or boot_config_get call + * is failed + */ + ret = psp_boot_config_set(adev, 0); + if (ret) + dev_warn(adev->dev, "PSP set boot config failed\n"); + else + dev_warn(adev->dev, "GECC will be disabled in next boot cycle " + "if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n"); + } + } else { + if (1 == boot_cfg) { + dev_info(adev->dev, "GECC is enabled\n"); + } else { + /* enable GECC in next boot cycle if it is disabled + * in boot config, or force enable GECC if failed to + * get boot configuration + */ + ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC); + if (ret) + dev_warn(adev->dev, "PSP set boot config failed\n"); + else + dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n"); + } + } + } + if (!psp->ras.ras_initialized) { ret = psp_ras_init_shared_buf(psp); if (ret) @@ -1234,7 +1422,7 @@ int psp_ras_trigger_error(struct psp_context *psp, if (amdgpu_ras_intr_triggered()) return 0; - return ras_cmd->ras_status; + return psp_ras_status_to_errno(psp->adev, ras_cmd->ras_status); } // ras end @@ -1271,9 +1459,8 @@ static int psp_hdcp_load(struct psp_context *psp) if (!cmd) return -ENOMEM; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr, - psp->ta_hdcp_ucode_size); + psp_copy_fw(psp, psp->ta_hdcp_start_addr, + psp->ta_hdcp_ucode_size); psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, @@ -1423,8 +1610,7 @@ static int psp_dtm_load(struct psp_context *psp) if (!cmd) return -ENOMEM; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size); + psp_copy_fw(psp, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size); psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, @@ -1569,8 +1755,7 @@ static int psp_rap_load(struct psp_context *psp) if (!cmd) return -ENOMEM; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - memcpy(psp->fw_pri_buf, psp->ta_rap_start_addr, psp->ta_rap_ucode_size); + psp_copy_fw(psp, psp->ta_rap_start_addr, psp->ta_rap_ucode_size); psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, @@ -1920,17 +2105,6 @@ static int psp_hw_start(struct psp_context *psp) return ret; } - ret = psp_clear_vf_fw(psp); - if (ret) { - DRM_ERROR("PSP clear vf fw!\n"); - return ret; - } - - ret = psp_boot_config_set(adev); - if (ret) { - DRM_WARN("PSP set boot config@\n"); - } - ret = psp_tmr_init(psp); if (ret) { DRM_ERROR("PSP tmr init failed!\n"); @@ -2166,12 +2340,9 @@ static int psp_load_smu_fw(struct psp_context *psp) return 0; if ((amdgpu_in_reset(adev) && - ras && ras->supported && + ras && adev->ras_enabled && (adev->asic_type == CHIP_ARCTURUS || - adev->asic_type == CHIP_VEGA20)) || - (adev->in_runpm && - adev->asic_type >= CHIP_NAVI10 && - adev->asic_type <= CHIP_NAVI12)) { + adev->asic_type == CHIP_VEGA20))) { ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD); if (ret) { DRM_WARN("Failed to set MP1 state prepare for reload\n"); @@ -2311,11 +2482,20 @@ static int psp_load_fw(struct amdgpu_device *adev) if (!psp->cmd) return -ENOMEM; - ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, - AMDGPU_GEM_DOMAIN_GTT, - &psp->fw_pri_bo, - &psp->fw_pri_mc_addr, - &psp->fw_pri_buf); + if (amdgpu_sriov_vf(adev)) { + ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, + AMDGPU_GEM_DOMAIN_VRAM, + &psp->fw_pri_bo, + &psp->fw_pri_mc_addr, + &psp->fw_pri_buf); + } else { + ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, + AMDGPU_GEM_DOMAIN_GTT, + &psp->fw_pri_bo, + &psp->fw_pri_mc_addr, + &psp->fw_pri_buf); + } + if (ret) goto failed; @@ -2434,7 +2614,6 @@ static int psp_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct psp_context *psp = &adev->psp; - int ret; if (psp->adev->psp.ta_fw) { psp_ras_terminate(psp); @@ -2445,11 +2624,6 @@ static int psp_hw_fini(void *handle) } psp_asd_unload(psp); - ret = psp_clear_vf_fw(psp); - if (ret) { - DRM_ERROR("PSP clear vf fw!\n"); - return ret; - } psp_tmr_terminate(psp); psp_ring_destroy(psp, PSP_RING_TYPE__KM); @@ -2539,10 +2713,12 @@ static int psp_resume(void *handle) DRM_INFO("PSP is resuming...\n"); - ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); - if (ret) { - DRM_ERROR("Failed to process memory training!\n"); - return ret; + if (psp->mem_train_ctx.enable_mem_training) { + ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); + if (ret) { + DRM_ERROR("Failed to process memory training!\n"); + return ret; + } } mutex_lock(&adev->firmware.mutex); @@ -2694,7 +2870,7 @@ int psp_ring_cmd_submit(struct psp_context *psp, write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); write_frame->fence_value = index; - amdgpu_asic_flush_hdp(adev, NULL); + amdgpu_device_flush_hdp(adev, NULL); /* Update the write Pointer in DWORDs */ psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; @@ -2726,7 +2902,7 @@ int psp_init_asd_microcode(struct psp_context *psp, asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data; adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version); - adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version); + adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->sos.fw_version); adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes); adev->psp.asd_start_addr = (uint8_t *)asd_hdr + le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes); @@ -2762,7 +2938,7 @@ int psp_init_toc_microcode(struct psp_context *psp, toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data; adev->psp.toc_fw_version = le32_to_cpu(toc_hdr->header.ucode_version); - adev->psp.toc_feature_version = le32_to_cpu(toc_hdr->ucode_feature_version); + adev->psp.toc_feature_version = le32_to_cpu(toc_hdr->sos.fw_version); adev->psp.toc_bin_size = le32_to_cpu(toc_hdr->header.ucode_size_bytes); adev->psp.toc_start_addr = (uint8_t *)toc_hdr + le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes); @@ -2774,6 +2950,50 @@ out: return err; } +static int psp_init_sos_base_fw(struct amdgpu_device *adev) +{ + const struct psp_firmware_header_v1_0 *sos_hdr; + const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; + uint8_t *ucode_array_start_addr; + + sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; + ucode_array_start_addr = (uint8_t *)sos_hdr + + le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); + + if (adev->gmc.xgmi.connected_to_cpu || (adev->asic_type != CHIP_ALDEBARAN)) { + adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version); + adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->sos.fw_version); + + adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos.offset_bytes); + adev->psp.sys_start_addr = ucode_array_start_addr; + + adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos.size_bytes); + adev->psp.sos_start_addr = ucode_array_start_addr + + le32_to_cpu(sos_hdr->sos.offset_bytes); + } else { + /* Load alternate PSP SOS FW */ + sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; + + adev->psp.sos_fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); + adev->psp.sos_feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version); + + adev->psp.sys_bin_size = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes); + adev->psp.sys_start_addr = ucode_array_start_addr + + le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes); + + adev->psp.sos_bin_size = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes); + adev->psp.sos_start_addr = ucode_array_start_addr + + le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes); + } + + if ((adev->psp.sys_bin_size == 0) || (adev->psp.sos_bin_size == 0)) { + dev_warn(adev->dev, "PSP SOS FW not available"); + return -EINVAL; + } + + return 0; +} + int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) { @@ -2784,6 +3004,7 @@ int psp_init_sos_microcode(struct psp_context *psp, const struct psp_firmware_header_v1_2 *sos_hdr_v1_2; const struct psp_firmware_header_v1_3 *sos_hdr_v1_3; int err = 0; + uint8_t *ucode_array_start_addr; if (!chip_name) { dev_err(adev->dev, "invalid chip name for sos microcode\n"); @@ -2800,47 +3021,45 @@ int psp_init_sos_microcode(struct psp_context *psp, goto out; sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data; + ucode_array_start_addr = (uint8_t *)sos_hdr + + le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); amdgpu_ucode_print_psp_hdr(&sos_hdr->header); switch (sos_hdr->header.header_version_major) { case 1: - adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version); - adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version); - adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes); - adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes); - adev->psp.sys_start_addr = (uint8_t *)sos_hdr + - le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes); - adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr + - le32_to_cpu(sos_hdr->sos_offset_bytes); + err = psp_init_sos_base_fw(adev); + if (err) + goto out; + if (sos_hdr->header.header_version_minor == 1) { sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data; - adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes); + adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes); adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr + - le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes); - adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes); + le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes); + adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes); adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + - le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes); + le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes); } if (sos_hdr->header.header_version_minor == 2) { sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data; - adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes); + adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes); adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + - le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes); + le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes); } if (sos_hdr->header.header_version_minor == 3) { sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data; - adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.toc_size_bytes); - adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr + - le32_to_cpu(sos_hdr_v1_3->v1_1.toc_offset_bytes); - adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_size_bytes); - adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr + - le32_to_cpu(sos_hdr_v1_3->v1_1.kdb_offset_bytes); - adev->psp.spl_bin_size = le32_to_cpu(sos_hdr_v1_3->spl_size_bytes); - adev->psp.spl_start_addr = (uint8_t *)adev->psp.sys_start_addr + - le32_to_cpu(sos_hdr_v1_3->spl_offset_bytes); - adev->psp.rl_bin_size = le32_to_cpu(sos_hdr_v1_3->rl_size_bytes); - adev->psp.rl_start_addr = (uint8_t *)adev->psp.sys_start_addr + - le32_to_cpu(sos_hdr_v1_3->rl_offset_bytes); + adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes); + adev->psp.toc_start_addr = ucode_array_start_addr + + le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes); + adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes); + adev->psp.kdb_start_addr = ucode_array_start_addr + + le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes); + adev->psp.spl_bin_size = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes); + adev->psp.spl_start_addr = ucode_array_start_addr + + le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes); + adev->psp.rl_bin_size = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes); + adev->psp.rl_start_addr = ucode_array_start_addr + + le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes); } break; default: @@ -3018,7 +3237,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, struct amdgpu_device *adev = drm_to_adev(ddev); void *cpu_addr; dma_addr_t dma_addr; - int ret; + int ret, idx; char fw_name[100]; const struct firmware *usbc_pd_fw; @@ -3027,6 +3246,9 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, return -EBUSY; } + if (!drm_dev_enter(ddev, &idx)) + return -ENODEV; + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf); ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev); if (ret) @@ -3058,16 +3280,29 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev, rel_buf: dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr); release_firmware(usbc_pd_fw); - fail: if (ret) { DRM_ERROR("Failed to load USBC PD FW, err = %d", ret); - return ret; + count = ret; } + drm_dev_exit(idx); return count; } +void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size) +{ + int idx; + + if (!drm_dev_enter(&psp->adev->ddev, &idx)) + return; + + memset(psp->fw_pri_buf, 0, PSP_1_MEG); + memcpy(psp->fw_pri_buf, start_addr, bin_size); + + drm_dev_exit(idx); +} + static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR, psp_usbc_pd_fw_sysfs_read, psp_usbc_pd_fw_sysfs_write); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 60aa99a39a74..3030ec24eb3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -225,6 +225,61 @@ struct psp_memory_training_context { enum psp_memory_training_init_flag init; u32 training_cnt; + bool enable_mem_training; +}; + +/** PSP runtime DB **/ +#define PSP_RUNTIME_DB_SIZE_IN_BYTES 0x10000 +#define PSP_RUNTIME_DB_OFFSET 0x100000 +#define PSP_RUNTIME_DB_COOKIE_ID 0x0ed5 +#define PSP_RUNTIME_DB_VER_1 0x0100 +#define PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT 0x40 + +enum psp_runtime_entry_type { + PSP_RUNTIME_ENTRY_TYPE_INVALID = 0x0, + PSP_RUNTIME_ENTRY_TYPE_TEST = 0x1, + PSP_RUNTIME_ENTRY_TYPE_MGPU_COMMON = 0x2, /* Common mGPU runtime data */ + PSP_RUNTIME_ENTRY_TYPE_MGPU_WAFL = 0x3, /* WAFL runtime data */ + PSP_RUNTIME_ENTRY_TYPE_MGPU_XGMI = 0x4, /* XGMI runtime data */ + PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG = 0x5, /* Boot Config runtime data */ +}; + +/* PSP runtime DB header */ +struct psp_runtime_data_header { + /* determine the existence of runtime db */ + uint16_t cookie; + /* version of runtime db */ + uint16_t version; +}; + +/* PSP runtime DB entry */ +struct psp_runtime_entry { + /* type of runtime db entry */ + uint32_t entry_type; + /* offset of entry in bytes */ + uint16_t offset; + /* size of entry in bytes */ + uint16_t size; +}; + +/* PSP runtime DB directory */ +struct psp_runtime_data_directory { + /* number of valid entries */ + uint16_t entry_count; + /* db entries*/ + struct psp_runtime_entry entry_list[PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT]; +}; + +/* PSP runtime DB boot config feature bitmask */ +enum psp_runtime_boot_cfg_feature { + BOOT_CFG_FEATURE_GECC = 0x1, + BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING = 0x2, +}; + +/* PSP runtime DB boot config entry */ +struct psp_runtime_boot_cfg_entry { + uint32_t boot_cfg_bitmask; + uint32_t reserved; }; struct psp_context @@ -325,6 +380,8 @@ struct psp_context struct psp_securedisplay_context securedisplay_context; struct mutex mutex; struct psp_memory_training_context mem_train_ctx; + + uint32_t boot_cfg_bitmask; }; struct amdgpu_psp_funcs { @@ -424,4 +481,6 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp, int psp_load_fw_list(struct psp_context *psp, struct amdgpu_firmware_info **ucode_list, int ucode_count); +void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index b0d2fc9454ca..c13b02caf8c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -27,12 +27,14 @@ #include <linux/uaccess.h> #include <linux/reboot.h> #include <linux/syscalls.h> +#include <linux/pm_runtime.h> #include "amdgpu.h" #include "amdgpu_ras.h" #include "amdgpu_atomfirmware.h" #include "amdgpu_xgmi.h" #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" +#include "atom.h" static const char *RAS_FS_NAME = "ras"; @@ -320,11 +322,14 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, * "disable" requires only the block. * "enable" requires the block and error type. * "inject" requires the block, error type, address, and value. + * * The block is one of: umc, sdma, gfx, etc. * see ras_block_string[] for details + * * The error type is one of: ue, ce, where, * ue is multi-uncorrectable * ce is single-correctable + * * The sub-block is a the sub-block index, pass 0 if there is no sub-block. * The address and value are hexadecimal numbers, leading 0x is optional. * @@ -531,7 +536,7 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj; - if (!adev->ras_features || !con) + if (!adev->ras_enabled || !con) return NULL; if (head->block >= AMDGPU_RAS_BLOCK_COUNT) @@ -558,7 +563,7 @@ struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, struct ras_manager *obj; int i; - if (!adev->ras_features || !con) + if (!adev->ras_enabled || !con) return NULL; if (head) { @@ -585,36 +590,11 @@ struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, } /* obj end */ -static void amdgpu_ras_parse_status_code(struct amdgpu_device *adev, - const char* invoke_type, - const char* block_name, - enum ta_ras_status ret) -{ - switch (ret) { - case TA_RAS_STATUS__SUCCESS: - return; - case TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE: - dev_warn(adev->dev, - "RAS WARN: %s %s currently unavailable\n", - invoke_type, - block_name); - break; - default: - dev_err(adev->dev, - "RAS ERROR: %s %s error failed ret 0x%X\n", - invoke_type, - block_name, - ret); - } -} - /* feature ctl begin */ static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev, - struct ras_common_if *head) + struct ras_common_if *head) { - struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - - return con->hw_supported & BIT(head->block); + return adev->ras_hw_enabled & BIT(head->block); } static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev, @@ -658,11 +638,7 @@ static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev, con->features |= BIT(head->block); } else { if (obj && amdgpu_ras_is_feature_enabled(adev, head)) { - /* skip clean gfx ras context feature for VEGA20 Gaming. - * will clean later - */ - if (!(!adev->ras_features && con->features & BIT(AMDGPU_RAS_BLOCK__GFX))) - con->features &= ~BIT(head->block); + con->features &= ~BIT(head->block); put_obj(obj); } } @@ -708,15 +684,10 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, if (!amdgpu_ras_intr_triggered()) { ret = psp_ras_enable_features(&adev->psp, info, enable); if (ret) { - amdgpu_ras_parse_status_code(adev, - enable ? "enable":"disable", - ras_block_str(head->block), - (enum ta_ras_status)ret); - if (ret == TA_RAS_STATUS__RESET_NEEDED) - ret = -EAGAIN; - else - ret = -EINVAL; - + dev_err(adev->dev, "ras %s %s failed %d\n", + enable ? "enable":"disable", + ras_block_str(head->block), + ret); goto out; } } @@ -770,6 +741,10 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, con->features |= BIT(head->block); ret = amdgpu_ras_feature_enable(adev, head, 0); + + /* clean gfx block ras features flag */ + if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX) + con->features &= ~BIT(head->block); } } else ret = amdgpu_ras_feature_enable(adev, head, enable); @@ -890,6 +865,11 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, adev->gmc.xgmi.ras_funcs->query_ras_error_count) adev->gmc.xgmi.ras_funcs->query_ras_error_count(adev, &err_data); break; + case AMDGPU_RAS_BLOCK__HDP: + if (adev->hdp.ras_funcs && + adev->hdp.ras_funcs->query_ras_error_count) + adev->hdp.ras_funcs->query_ras_error_count(adev, &err_data); + break; default: break; } @@ -901,17 +881,42 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, info->ce_count = obj->err_data.ce_count; if (err_data.ce_count) { - dev_info(adev->dev, "%ld correctable hardware errors " + if (adev->smuio.funcs && + adev->smuio.funcs->get_socket_id && + adev->smuio.funcs->get_die_id) { + dev_info(adev->dev, "socket: %d, die: %d " + "%ld correctable hardware errors " "detected in %s block, no user " "action is needed.\n", + adev->smuio.funcs->get_socket_id(adev), + adev->smuio.funcs->get_die_id(adev), obj->err_data.ce_count, ras_block_str(info->head.block)); + } else { + dev_info(adev->dev, "%ld correctable hardware errors " + "detected in %s block, no user " + "action is needed.\n", + obj->err_data.ce_count, + ras_block_str(info->head.block)); + } } if (err_data.ue_count) { - dev_info(adev->dev, "%ld uncorrectable hardware errors " + if (adev->smuio.funcs && + adev->smuio.funcs->get_socket_id && + adev->smuio.funcs->get_die_id) { + dev_info(adev->dev, "socket: %d, die: %d " + "%ld uncorrectable hardware errors " "detected in %s block\n", + adev->smuio.funcs->get_socket_id(adev), + adev->smuio.funcs->get_die_id(adev), obj->err_data.ue_count, ras_block_str(info->head.block)); + } else { + dev_info(adev->dev, "%ld uncorrectable hardware errors " + "detected in %s block\n", + obj->err_data.ue_count, + ras_block_str(info->head.block)); + } } return 0; @@ -937,11 +942,20 @@ int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, if (adev->mmhub.ras_funcs && adev->mmhub.ras_funcs->reset_ras_error_count) adev->mmhub.ras_funcs->reset_ras_error_count(adev); + + if (adev->mmhub.ras_funcs && + adev->mmhub.ras_funcs->reset_ras_error_status) + adev->mmhub.ras_funcs->reset_ras_error_status(adev); break; case AMDGPU_RAS_BLOCK__SDMA: if (adev->sdma.funcs->reset_ras_error_count) adev->sdma.funcs->reset_ras_error_count(adev); break; + case AMDGPU_RAS_BLOCK__HDP: + if (adev->hdp.ras_funcs && + adev->hdp.ras_funcs->reset_ras_error_count) + adev->hdp.ras_funcs->reset_ras_error_count(adev); + break; default: break; } @@ -1022,38 +1036,44 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, ret = -EINVAL; } - amdgpu_ras_parse_status_code(adev, - "inject", - ras_block_str(info->head.block), - (enum ta_ras_status)ret); + if (ret) + dev_err(adev->dev, "ras inject %s failed %d\n", + ras_block_str(info->head.block), ret); return ret; } /* get the total error counts on all IPs */ -unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev, - bool is_ce) +void amdgpu_ras_query_error_count(struct amdgpu_device *adev, + unsigned long *ce_count, + unsigned long *ue_count) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj; - struct ras_err_data data = {0, 0}; + unsigned long ce, ue; - if (!adev->ras_features || !con) - return 0; + if (!adev->ras_enabled || !con) + return; + ce = 0; + ue = 0; list_for_each_entry(obj, &con->head, node) { struct ras_query_if info = { .head = obj->head, }; if (amdgpu_ras_query_error_status(adev, &info)) - return 0; + return; - data.ce_count += info.ce_count; - data.ue_count += info.ue_count; + ce += info.ce_count; + ue += info.ue_count; } - return is_ce ? data.ce_count : data.ue_count; + if (ce_count) + *ce_count = ce; + + if (ue_count) + *ue_count = ue; } /* query/inject/cure end */ @@ -1265,8 +1285,8 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - struct dentry *dir; - struct drm_minor *minor = adev_to_drm(adev)->primary; + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *dir; dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root); debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev, @@ -1275,6 +1295,8 @@ static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device * &amdgpu_ras_debugfs_eeprom_ops); debugfs_create_u32("bad_page_cnt_threshold", 0444, dir, &con->bad_page_cnt_threshold); + debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled); + debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled); /* * After one uncorrectable error happens, usually GPU recovery will @@ -1561,7 +1583,7 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj; - if (!adev->ras_features || !con) + if (!adev->ras_enabled || !con) return; list_for_each_entry(obj, &con->head, node) { @@ -1611,7 +1633,7 @@ static void amdgpu_ras_query_err_status(struct amdgpu_device *adev) struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj; - if (!adev->ras_features || !con) + if (!adev->ras_enabled || !con) return; list_for_each_entry(obj, &con->head, node) { @@ -1925,7 +1947,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) bool exc_err_limit = false; int ret; - if (adev->ras_features && con) + if (adev->ras_enabled && con) data = &con->eh_data; else return 0; @@ -1962,6 +1984,9 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) ret = amdgpu_ras_load_bad_pages(adev); if (ret) goto free; + + if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num) + adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.num_recs); } return 0; @@ -2029,6 +2054,25 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) } /* + * this is workaround for vega20 workstation sku, + * force enable gfx ras, ignore vbios gfx ras flag + * due to GC EDC can not write + */ +static void amdgpu_ras_get_quirks(struct amdgpu_device *adev) +{ + struct atom_context *ctx = adev->mode_info.atom_context; + + if (!ctx) + return; + + if (strnstr(ctx->vbios_version, "D16406", + sizeof(ctx->vbios_version)) || + strnstr(ctx->vbios_version, "D36002", + sizeof(ctx->vbios_version))) + adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX); +} + +/* * check hardware's ras ability which will be saved in hw_supported. * if hardware does not support ras, we can skip some ras initializtion and * forbid some ras operations from IP. @@ -2037,11 +2081,9 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) * we have to initialize ras as normal. but need check if operation is * allowed or not in each function. */ -static void amdgpu_ras_check_supported(struct amdgpu_device *adev, - uint32_t *hw_supported, uint32_t *supported) +static void amdgpu_ras_check_supported(struct amdgpu_device *adev) { - *hw_supported = 0; - *supported = 0; + adev->ras_hw_enabled = adev->ras_enabled = 0; if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw || !amdgpu_ras_asic_supported(adev)) @@ -2050,33 +2092,58 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev, if (!adev->gmc.xgmi.connected_to_cpu) { if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { dev_info(adev->dev, "MEM ECC is active.\n"); - *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC | - 1 << AMDGPU_RAS_BLOCK__DF); + adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC | + 1 << AMDGPU_RAS_BLOCK__DF); } else { dev_info(adev->dev, "MEM ECC is not presented.\n"); } if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { dev_info(adev->dev, "SRAM ECC is active.\n"); - *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC | - 1 << AMDGPU_RAS_BLOCK__DF); + adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC | + 1 << AMDGPU_RAS_BLOCK__DF); } else { dev_info(adev->dev, "SRAM ECC is not presented.\n"); } } else { /* driver only manages a few IP blocks RAS feature * when GPU is connected cpu through XGMI */ - *hw_supported |= (1 << AMDGPU_RAS_BLOCK__GFX | - 1 << AMDGPU_RAS_BLOCK__SDMA | - 1 << AMDGPU_RAS_BLOCK__MMHUB); + adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX | + 1 << AMDGPU_RAS_BLOCK__SDMA | + 1 << AMDGPU_RAS_BLOCK__MMHUB); } + amdgpu_ras_get_quirks(adev); + /* hw_supported needs to be aligned with RAS block mask. */ - *hw_supported &= AMDGPU_RAS_BLOCK_MASK; + adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK; + + adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 : + adev->ras_hw_enabled & amdgpu_ras_mask; +} + +static void amdgpu_ras_counte_dw(struct work_struct *work) +{ + struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, + ras_counte_delay_work.work); + struct amdgpu_device *adev = con->adev; + struct drm_device *dev = adev_to_drm(adev); + unsigned long ce_count, ue_count; + int res; + + res = pm_runtime_get_sync(dev->dev); + if (res < 0) + goto Out; - *supported = amdgpu_ras_enable == 0 ? - 0 : *hw_supported & amdgpu_ras_mask; - adev->ras_features = *supported; + /* Cache new values. + */ + amdgpu_ras_query_error_count(adev, &ce_count, &ue_count); + atomic_set(&con->ras_ce_count, ce_count); + atomic_set(&con->ras_ue_count, ue_count); + + pm_runtime_mark_last_busy(dev->dev); +Out: + pm_runtime_put_autosuspend(dev->dev); } int amdgpu_ras_init(struct amdgpu_device *adev) @@ -2093,17 +2160,22 @@ int amdgpu_ras_init(struct amdgpu_device *adev) if (!con) return -ENOMEM; + con->adev = adev; + INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw); + atomic_set(&con->ras_ce_count, 0); + atomic_set(&con->ras_ue_count, 0); + con->objs = (struct ras_manager *)(con + 1); amdgpu_ras_set_context(adev, con); - amdgpu_ras_check_supported(adev, &con->hw_supported, - &con->supported); - if (!con->hw_supported || (adev->asic_type == CHIP_VEGA10)) { + amdgpu_ras_check_supported(adev); + + if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) { /* set gfx block ras context feature for VEGA20 Gaming * send ras disable cmd to ras ta during ras late init. */ - if (!adev->ras_features && adev->asic_type == CHIP_VEGA20) { + if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) { con->features |= BIT(AMDGPU_RAS_BLOCK__GFX); return 0; @@ -2153,8 +2225,9 @@ int amdgpu_ras_init(struct amdgpu_device *adev) } dev_info(adev->dev, "RAS INFO: ras initialized successfully, " - "hardware ability[%x] ras_mask[%x]\n", - con->hw_supported, con->supported); + "hardware ability[%x] ras_mask[%x]\n", + adev->ras_hw_enabled, adev->ras_enabled); + return 0; release_con: amdgpu_ras_set_context(adev, NULL); @@ -2163,7 +2236,7 @@ release_con: return r; } -static int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev) +int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev) { if (adev->gmc.xgmi.connected_to_cpu) return 1; @@ -2195,6 +2268,8 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, struct ras_fs_if *fs_info, struct ras_ih_if *ih_info) { + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + unsigned long ue_count, ce_count; int r; /* disable RAS feature per IP block if it is not supported */ @@ -2235,6 +2310,12 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, if (r) goto sysfs; + /* Those are the cached values at init. + */ + amdgpu_ras_query_error_count(adev, &ce_count, &ue_count); + atomic_set(&con->ras_ce_count, ce_count); + atomic_set(&con->ras_ue_count, ue_count); + return 0; cleanup: amdgpu_ras_sysfs_remove(adev, ras_block); @@ -2268,7 +2349,7 @@ void amdgpu_ras_resume(struct amdgpu_device *adev) struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj, *tmp; - if (!adev->ras_features || !con) { + if (!adev->ras_enabled || !con) { /* clean ras context for VEGA20 Gaming after send ras disable cmd */ amdgpu_release_ras_context(adev); @@ -2314,7 +2395,7 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - if (!adev->ras_features || !con) + if (!adev->ras_enabled || !con) return; amdgpu_ras_disable_all_features(adev, 0); @@ -2328,9 +2409,10 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - if (!adev->ras_features || !con) + if (!adev->ras_enabled || !con) return 0; + /* Need disable ras on all IPs here before ip [hw/sw]fini */ amdgpu_ras_disable_all_features(adev, 0); amdgpu_ras_recovery_fini(adev); @@ -2341,7 +2423,7 @@ int amdgpu_ras_fini(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - if (!adev->ras_features || !con) + if (!adev->ras_enabled || !con) return 0; amdgpu_ras_fs_fini(adev); @@ -2352,6 +2434,8 @@ int amdgpu_ras_fini(struct amdgpu_device *adev) if (con->features) amdgpu_ras_disable_all_features(adev, 1); + cancel_delayed_work_sync(&con->ras_counte_delay_work); + amdgpu_ras_set_context(adev, NULL); kfree(con); @@ -2360,10 +2444,8 @@ int amdgpu_ras_fini(struct amdgpu_device *adev) void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) { - uint32_t hw_supported, supported; - - amdgpu_ras_check_supported(adev, &hw_supported, &supported); - if (!hw_supported) + amdgpu_ras_check_supported(adev); + if (!adev->ras_hw_enabled) return; if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { @@ -2392,7 +2474,7 @@ void amdgpu_release_ras_context(struct amdgpu_device *adev) if (!con) return; - if (!adev->ras_features && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { + if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX); amdgpu_ras_set_context(adev, NULL); kfree(con); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 60df268a0c66..256cea5d34f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -313,9 +313,6 @@ struct ras_common_if { struct amdgpu_ras { /* ras infrastructure */ /* for ras itself. */ - uint32_t hw_supported; - /* for IP to check its ras ability. */ - uint32_t supported; uint32_t features; struct list_head head; /* sysfs */ @@ -343,6 +340,11 @@ struct amdgpu_ras { /* disable ras error count harvest in recovery */ bool disable_ras_err_cnt_harvest; + + /* RAS count errors delayed work */ + struct delayed_work ras_counte_delay_work; + atomic_t ras_ue_count; + atomic_t ras_ce_count; }; struct ras_fs_data { @@ -478,7 +480,7 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev, if (block >= AMDGPU_RAS_BLOCK_COUNT) return 0; - return ras && (ras->supported & (1 << block)); + return ras && (adev->ras_enabled & (1 << block)); } int amdgpu_ras_recovery_init(struct amdgpu_device *adev); @@ -488,8 +490,9 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, void amdgpu_ras_resume(struct amdgpu_device *adev); void amdgpu_ras_suspend(struct amdgpu_device *adev); -unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev, - bool is_ce); +void amdgpu_ras_query_error_count(struct amdgpu_device *adev, + unsigned long *ce_count, + unsigned long *ue_count); /* error handling functions */ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, @@ -628,4 +631,7 @@ void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready); bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev); void amdgpu_release_ras_context(struct amdgpu_device *adev); + +int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h index 40f2adf305bc..59e0fefb15aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h @@ -28,6 +28,7 @@ #include <drm/drm_mm.h> #include <drm/ttm/ttm_resource.h> +#include <drm/ttm/ttm_range_manager.h> /* state back for walking over vram_mgr and gtt_mgr allocations */ struct amdgpu_res_cursor { @@ -53,7 +54,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res, { struct drm_mm_node *node; - if (!res || !res->mm_node) { + if (!res) { cur->start = start; cur->size = size; cur->remaining = size; @@ -63,7 +64,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res, BUG_ON(start + size > res->num_pages << PAGE_SHIFT); - node = res->mm_node; + node = to_ttm_range_mgr_node(res)->mm_nodes; while (start >= node->size << PAGE_SHIFT) start -= node++->size << PAGE_SHIFT; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 688624ebe421..7b634a1517f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -158,6 +158,7 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring) * @irq_src: interrupt source to use for this ring * @irq_type: interrupt type to use for this ring * @hw_prio: ring priority (NORMAL/HIGH) + * @sched_score: optional score atomic shared with other schedulers * * Initialize the driver information for the selected ring (all asics). * Returns 0 on success, error on failure. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index ca1622835296..e7d3d0dbdd96 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -107,7 +107,8 @@ struct amdgpu_fence_driver { }; int amdgpu_fence_driver_init(struct amdgpu_device *adev); -void amdgpu_fence_driver_fini(struct amdgpu_device *adev); +void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev); +void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev); void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring); int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h index 4fc2ce8ce8ab..7a4775ab6804 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h @@ -127,8 +127,8 @@ struct amdgpu_rlc_funcs { void (*reset)(struct amdgpu_device *adev); void (*start)(struct amdgpu_device *adev); void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid); - void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag); - u32 (*rlcg_rreg)(struct amdgpu_device *adev, u32 offset, u32 flag); + void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 acc_flags, u32 hwip); + u32 (*rlcg_rreg)(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip); bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg); }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c index 5369c8dd0764..123453999093 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c @@ -86,6 +86,8 @@ void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedispla (*cmd)->cmd_id = command_id; } +#if defined(CONFIG_DEBUG_FS) + static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { @@ -162,6 +164,8 @@ static const struct file_operations amdgpu_securedisplay_debugfs_ops = { .llseek = default_llseek }; +#endif + void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h index b860ec913ac5..484bb3dcec47 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h @@ -29,6 +29,7 @@ struct amdgpu_smuio_funcs { void (*update_rom_clock_gating)(struct amdgpu_device *adev, bool enable); void (*get_clock_gating_state)(struct amdgpu_device *adev, u32 *flags); u32 (*get_die_id)(struct amdgpu_device *adev); + u32 (*get_socket_id)(struct amdgpu_device *adev); bool (*is_host_gpu_xgmi_supported)(struct amdgpu_device *adev); }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 4e558632a5d2..1b2ceccaf5b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -210,10 +210,10 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, return -EINVAL; /* always sync to the exclusive fence */ - f = dma_resv_get_excl(resv); + f = dma_resv_excl_fence(resv); r = amdgpu_sync_fence(sync, f); - flist = dma_resv_get_list(resv); + flist = dma_resv_shared_list(resv); if (!flist || r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 792d20261846..0527772fe1b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -127,8 +127,8 @@ TRACE_EVENT(amdgpu_bo_create, TP_fast_assign( __entry->bo = bo; - __entry->pages = bo->tbo.mem.num_pages; - __entry->type = bo->tbo.mem.mem_type; + __entry->pages = bo->tbo.resource->num_pages; + __entry->type = bo->tbo.resource->mem_type; __entry->prefer = bo->preferred_domains; __entry->allow = bo->allowed_domains; __entry->visible = bo->flags; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 61c4fb1b87fe..cdfc20b0b2eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -32,7 +32,6 @@ #include <linux/dma-mapping.h> #include <linux/iommu.h> -#include <linux/hmm.h> #include <linux/pagemap.h> #include <linux/sched/task.h> #include <linux/sched/mm.h> @@ -46,6 +45,7 @@ #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> +#include <drm/ttm/ttm_range_manager.h> #include <drm/amdgpu_drm.h> @@ -112,7 +112,22 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, } abo = ttm_to_amdgpu_bo(bo); - switch (bo->mem.mem_type) { + if (abo->flags & AMDGPU_AMDKFD_CREATE_SVM_BO) { + struct dma_fence *fence; + struct dma_resv *resv = &bo->base._resv; + + rcu_read_lock(); + fence = rcu_dereference(resv->fence_excl); + if (fence && !fence->ops->signaled) + dma_fence_enable_sw_signaling(fence); + + placement->num_placement = 0; + placement->num_busy_placement = 0; + rcu_read_unlock(); + return; + } + + switch (bo->resource->mem_type) { case AMDGPU_PL_GDS: case AMDGPU_PL_GWS: case AMDGPU_PL_OA: @@ -145,6 +160,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, } break; case TTM_PL_TT: + case AMDGPU_PL_PREEMPT: default: amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); break; @@ -153,32 +169,6 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, } /** - * amdgpu_verify_access - Verify access for a mmap call - * - * @bo: The buffer object to map - * @filp: The file pointer from the process performing the mmap - * - * This is called by ttm_bo_mmap() to verify whether a process - * has the right to mmap a BO to their process space. - */ -static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) -{ - struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); - - /* - * Don't verify access for KFD BOs. They don't have a GEM - * object associated with them. - */ - if (abo->kfd_bo) - return 0; - - if (amdgpu_ttm_tt_get_usermm(bo->ttm)) - return -EPERM; - return drm_vma_node_verify_access(&abo->tbo.base.vma_node, - filp->private_data); -} - -/** * amdgpu_ttm_map_buffer - Map memory into the GART windows * @bo: buffer object to map * @mem: memory object to map @@ -211,6 +201,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); + BUG_ON(mem->mem_type == AMDGPU_PL_PREEMPT); /* Map only what can't be accessed directly */ if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) { @@ -288,7 +279,7 @@ error_free: } /** - * amdgpu_copy_ttm_mem_to_mem - Helper function for copy + * amdgpu_ttm_copy_mem_to_mem - Helper function for copy * @adev: amdgpu device * @src: buffer/address where to read from * @dst: buffer/address where to write to @@ -471,10 +462,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, { struct amdgpu_device *adev; struct amdgpu_bo *abo; - struct ttm_resource *old_mem = &bo->mem; + struct ttm_resource *old_mem = bo->resource; int r; - if (new_mem->mem_type == TTM_PL_TT) { + if (new_mem->mem_type == TTM_PL_TT || + new_mem->mem_type == AMDGPU_PL_PREEMPT) { r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); if (r) return r; @@ -492,18 +484,20 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, goto out; } if (old_mem->mem_type == TTM_PL_SYSTEM && - new_mem->mem_type == TTM_PL_TT) { + (new_mem->mem_type == TTM_PL_TT || + new_mem->mem_type == AMDGPU_PL_PREEMPT)) { ttm_bo_move_null(bo, new_mem); goto out; } - if (old_mem->mem_type == TTM_PL_TT && + if ((old_mem->mem_type == TTM_PL_TT || + old_mem->mem_type == AMDGPU_PL_PREEMPT) && new_mem->mem_type == TTM_PL_SYSTEM) { r = ttm_bo_wait_ctx(bo, ctx); if (r) return r; amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); - ttm_resource_free(bo, &bo->mem); + ttm_resource_free(bo, &bo->resource); ttm_bo_assign_mem(bo, new_mem); goto out; } @@ -570,10 +564,10 @@ out: * * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault() */ -static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) +static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, + struct ttm_resource *mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); - struct drm_mm_node *mm_node = mem->mm_node; size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; switch (mem->mem_type) { @@ -581,18 +575,16 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resourc /* system memory */ return 0; case TTM_PL_TT: + case AMDGPU_PL_PREEMPT: break; case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; /* check if it's visible */ if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size) return -EINVAL; - /* Only physically contiguous buffers apply. In a contiguous - * buffer, size of the first mm_node would match the number of - * pages in ttm_resource. - */ + if (adev->mman.aper_base_kaddr && - (mm_node->size == mem->num_pages)) + mem->placement & TTM_PL_FLAG_CONTIGUOUS) mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr + mem->bus.offset; @@ -615,7 +607,8 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_res_cursor cursor; - amdgpu_res_first(&bo->mem, (u64)page_offset << PAGE_SHIFT, 0, &cursor); + amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0, + &cursor); return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT; } @@ -670,10 +663,8 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) struct amdgpu_ttm_tt *gtt = (void *)ttm; unsigned long start = gtt->userptr; struct vm_area_struct *vma; - struct hmm_range *range; - unsigned long timeout; struct mm_struct *mm; - unsigned long i; + bool readonly; int r = 0; mm = bo->notifier.mm; @@ -689,25 +680,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) if (!mmget_not_zero(mm)) /* Happens during process shutdown */ return -ESRCH; - range = kzalloc(sizeof(*range), GFP_KERNEL); - if (unlikely(!range)) { - r = -ENOMEM; - goto out; - } - range->notifier = &bo->notifier; - range->start = bo->notifier.interval_tree.start; - range->end = bo->notifier.interval_tree.last + 1; - range->default_flags = HMM_PFN_REQ_FAULT; - if (!amdgpu_ttm_tt_is_readonly(ttm)) - range->default_flags |= HMM_PFN_REQ_WRITE; - - range->hmm_pfns = kvmalloc_array(ttm->num_pages, - sizeof(*range->hmm_pfns), GFP_KERNEL); - if (unlikely(!range->hmm_pfns)) { - r = -ENOMEM; - goto out_free_ranges; - } - mmap_read_lock(mm); vma = vma_lookup(mm, start); if (unlikely(!vma)) { @@ -719,46 +691,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) r = -EPERM; goto out_unlock; } - mmap_read_unlock(mm); - timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); - -retry: - range->notifier_seq = mmu_interval_read_begin(&bo->notifier); - - mmap_read_lock(mm); - r = hmm_range_fault(range); - mmap_read_unlock(mm); - if (unlikely(r)) { - /* - * FIXME: This timeout should encompass the retry from - * mmu_interval_read_retry() as well. - */ - if (r == -EBUSY && !time_after(jiffies, timeout)) - goto retry; - goto out_free_pfns; - } - - /* - * Due to default_flags, all pages are HMM_PFN_VALID or - * hmm_range_fault() fails. FIXME: The pages cannot be touched outside - * the notifier_lock, and mmu_interval_read_retry() must be done first. - */ - for (i = 0; i < ttm->num_pages; i++) - pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]); - - gtt->range = range; - mmput(mm); - - return 0; + readonly = amdgpu_ttm_tt_is_readonly(ttm); + r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start, + ttm->num_pages, >t->range, readonly, + true); out_unlock: mmap_read_unlock(mm); -out_free_pfns: - kvfree(range->hmm_pfns); -out_free_ranges: - kfree(range); -out: mmput(mm); + return r; } @@ -787,10 +728,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) * FIXME: Must always hold notifier_lock for this, and must * not ignore the return code. */ - r = mmu_interval_read_retry(gtt->range->notifier, - gtt->range->notifier_seq); - kvfree(gtt->range->hmm_pfns); - kfree(gtt->range); + r = amdgpu_hmm_range_get_pages_done(gtt->range); gtt->range = NULL; } @@ -903,7 +841,7 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, uint64_t page_idx = 1; r = amdgpu_gart_bind(adev, gtt->offset, page_idx, - ttm->pages, gtt->ttm.dma_address, flags); + gtt->ttm.dma_address, flags); if (r) goto gart_bind_fail; @@ -917,11 +855,10 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, r = amdgpu_gart_bind(adev, gtt->offset + (page_idx << PAGE_SHIFT), ttm->num_pages - page_idx, - &ttm->pages[page_idx], &(gtt->ttm.dma_address[page_idx]), flags); } else { r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, - ttm->pages, gtt->ttm.dma_address, flags); + gtt->ttm.dma_address, flags); } gart_bind_fail: @@ -959,7 +896,23 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, DRM_ERROR("failed to pin userptr\n"); return r; } + } else if (ttm->page_flags & TTM_PAGE_FLAG_SG) { + if (!ttm->sg) { + struct dma_buf_attachment *attach; + struct sg_table *sgt; + + attach = gtt->gobj->import_attach; + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) + return PTR_ERR(sgt); + + ttm->sg = sgt; + } + + drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, + ttm->num_pages); } + if (!ttm->num_pages) { WARN(1, "nothing to bind %u pages for mreg %p back %p!\n", ttm->num_pages, bo_mem, ttm); @@ -981,7 +934,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, /* bind pages into GART page tables */ gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, - ttm->pages, gtt->ttm.dma_address, flags); + gtt->ttm.dma_address, flags); if (r) DRM_ERROR("failed to bind %u pages at 0x%08llX\n", @@ -1003,51 +956,50 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct ttm_operation_ctx ctx = { false, false }; struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; - struct ttm_resource tmp; struct ttm_placement placement; struct ttm_place placements; + struct ttm_resource *tmp; uint64_t addr, flags; int r; - if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET) + if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET) return 0; addr = amdgpu_gmc_agp_addr(bo); if (addr != AMDGPU_BO_INVALID_OFFSET) { - bo->mem.start = addr >> PAGE_SHIFT; - } else { + bo->resource->start = addr >> PAGE_SHIFT; + return 0; + } - /* allocate GART space */ - tmp = bo->mem; - tmp.mm_node = NULL; - placement.num_placement = 1; - placement.placement = &placements; - placement.num_busy_placement = 1; - placement.busy_placement = &placements; - placements.fpfn = 0; - placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; - placements.mem_type = TTM_PL_TT; - placements.flags = bo->mem.placement; - - r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); - if (unlikely(r)) - return r; + /* allocate GART space */ + placement.num_placement = 1; + placement.placement = &placements; + placement.num_busy_placement = 1; + placement.busy_placement = &placements; + placements.fpfn = 0; + placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; + placements.mem_type = TTM_PL_TT; + placements.flags = bo->resource->placement; - /* compute PTE flags for this buffer object */ - flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); + r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); + if (unlikely(r)) + return r; - /* Bind pages */ - gtt->offset = (u64)tmp.start << PAGE_SHIFT; - r = amdgpu_ttm_gart_bind(adev, bo, flags); - if (unlikely(r)) { - ttm_resource_free(bo, &tmp); - return r; - } + /* compute PTE flags for this buffer object */ + flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp); - ttm_resource_free(bo, &bo->mem); - bo->mem = tmp; + /* Bind pages */ + gtt->offset = (u64)tmp->start << PAGE_SHIFT; + r = amdgpu_ttm_gart_bind(adev, bo, flags); + if (unlikely(r)) { + ttm_resource_free(bo, &tmp); + return r; } + amdgpu_gart_invalidate_tlb(adev); + ttm_resource_free(bo, &bo->resource); + ttm_bo_assign_mem(bo, tmp); + return 0; } @@ -1066,7 +1018,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) if (!tbo->ttm) return 0; - flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem); + flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource); r = amdgpu_ttm_gart_bind(adev, tbo, flags); return r; @@ -1086,8 +1038,15 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, int r; /* if the pages have userptr pinning then clear that first */ - if (gtt->userptr) + if (gtt->userptr) { amdgpu_ttm_tt_unpin_userptr(bdev, ttm); + } else if (ttm->sg && gtt->gobj->import_attach) { + struct dma_buf_attachment *attach; + + attach = gtt->gobj->import_attach; + dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); + ttm->sg = NULL; + } if (!gtt->bound) return; @@ -1169,28 +1128,11 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev, ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); if (!ttm->sg) return -ENOMEM; - - ttm->page_flags |= TTM_PAGE_FLAG_SG; return 0; } - if (ttm->page_flags & TTM_PAGE_FLAG_SG) { - if (!ttm->sg) { - struct dma_buf_attachment *attach; - struct sg_table *sgt; - - attach = gtt->gobj->import_attach; - sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); - if (IS_ERR(sgt)) - return PTR_ERR(sgt); - - ttm->sg = sgt; - } - - drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, - ttm->num_pages); + if (ttm->page_flags & TTM_PAGE_FLAG_SG) return 0; - } return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx); } @@ -1211,16 +1153,6 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev, amdgpu_ttm_tt_set_user_pages(ttm, NULL); kfree(ttm->sg); ttm->sg = NULL; - ttm->page_flags &= ~TTM_PAGE_FLAG_SG; - return; - } - - if (ttm->sg && gtt->gobj->import_attach) { - struct dma_buf_attachment *attach; - - attach = gtt->gobj->import_attach; - dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); - ttm->sg = NULL; return; } @@ -1254,6 +1186,9 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, return -ENOMEM; } + /* Set TTM_PAGE_FLAG_SG before populate but after create. */ + bo->ttm->page_flags |= TTM_PAGE_FLAG_SG; + gtt = (void *)bo->ttm; gtt->userptr = addr; gtt->userflags = flags; @@ -1347,7 +1282,8 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) if (mem && mem->mem_type != TTM_PL_SYSTEM) flags |= AMDGPU_PTE_VALID; - if (mem && mem->mem_type == TTM_PL_TT) { + if (mem && (mem->mem_type == TTM_PL_TT || + mem->mem_type == AMDGPU_PL_PREEMPT)) { flags |= AMDGPU_PTE_SYSTEM; if (ttm->caching == ttm_cached) @@ -1396,12 +1332,16 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place) { - unsigned long num_pages = bo->mem.num_pages; + unsigned long num_pages = bo->resource->num_pages; struct amdgpu_res_cursor cursor; struct dma_resv_list *flist; struct dma_fence *f; int i; + /* Swapout? */ + if (bo->resource->mem_type == TTM_PL_SYSTEM) + return true; + if (bo->type == ttm_bo_type_kernel && !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo))) return false; @@ -1410,7 +1350,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, * If true, then return false as any KFD process needs all its BOs to * be resident to run successfully */ - flist = dma_resv_get_list(bo->base.resv); + flist = dma_resv_shared_list(bo->base.resv); if (flist) { for (i = 0; i < flist->shared_count; ++i) { f = rcu_dereference_protected(flist->shared[i], @@ -1420,7 +1360,16 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, } } - switch (bo->mem.mem_type) { + switch (bo->resource->mem_type) { + case AMDGPU_PL_PREEMPT: + /* Preemptible BOs don't own system resources managed by the + * driver (pages, VRAM, GART space). They point to resources + * owned by someone else (e.g. pageable memory in user mode + * or a DMABuf). They are used in a preemptible context so we + * can guarantee no deadlocks and good QoS in case of MMU + * notifiers or DMABuf move notifiers from the resource owner. + */ + return false; case TTM_PL_TT: if (amdgpu_bo_is_amdgpu_bo(bo) && amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo))) @@ -1429,7 +1378,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, case TTM_PL_VRAM: /* Check each drm MM node individually */ - amdgpu_res_first(&bo->mem, 0, (u64)num_pages << PAGE_SHIFT, + amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT, &cursor); while (cursor.remaining) { if (place->fpfn < PFN_DOWN(cursor.start + cursor.size) @@ -1471,10 +1420,10 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, uint32_t value = 0; int ret = 0; - if (bo->mem.mem_type != TTM_PL_VRAM) + if (bo->resource->mem_type != TTM_PL_VRAM) return -EIO; - amdgpu_res_first(&bo->mem, offset, len, &cursor); + amdgpu_res_first(bo->resource, offset, len, &cursor); while (cursor.remaining) { uint64_t aligned_pos = cursor.start & ~(uint64_t)3; uint64_t bytes = 4 - (cursor.start & 3); @@ -1530,7 +1479,6 @@ static struct ttm_device_funcs amdgpu_bo_driver = { .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, .evict_flags = &amdgpu_evict_flags, .move = &amdgpu_bo_move, - .verify_access = &amdgpu_verify_access, .delete_mem_notify = &amdgpu_bo_delete_mem_notify, .release_notify = &amdgpu_bo_release_notify, .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, @@ -1633,11 +1581,8 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) bool mem_train_support = false; if (!amdgpu_sriov_vf(adev)) { - ret = amdgpu_mem_train_support(adev); - if (ret == 1) + if (amdgpu_atomfirmware_mem_training_supported(adev)) mem_train_support = true; - else if (ret == -1) - return -EINVAL; else DRM_DEBUG("memory training does not support!\n"); } @@ -1779,6 +1724,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) NULL); if (r) return r; + r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset, + adev->mman.stolen_reserved_size, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->mman.stolen_reserved_memory, + NULL); + if (r) + return r; DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); @@ -1805,6 +1757,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_INFO("amdgpu: %uM of GTT memory ready.\n", (unsigned)(gtt_size / (1024 * 1024))); + /* Initialize preemptible memory pool */ + r = amdgpu_preempt_mgr_init(adev); + if (r) { + DRM_ERROR("Failed initializing PREEMPT heap.\n"); + return r; + } + /* Initialize various on-chip memory pools */ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size); if (r) { @@ -1841,14 +1800,14 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); /* return the IP Discovery TMR memory back to VRAM */ amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL); + if (adev->mman.stolen_reserved_size) + amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory, + NULL, NULL); amdgpu_ttm_fw_reserve_vram_fini(adev); - if (adev->mman.aper_base_kaddr) - iounmap(adev->mman.aper_base_kaddr); - adev->mman.aper_base_kaddr = NULL; - amdgpu_vram_mgr_fini(adev); amdgpu_gtt_mgr_fini(adev); + amdgpu_preempt_mgr_fini(adev); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA); @@ -1905,50 +1864,6 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) adev->mman.buffer_funcs_enabled = enable; } -static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf) -{ - struct ttm_buffer_object *bo = vmf->vma->vm_private_data; - vm_fault_t ret; - - ret = ttm_bo_vm_reserve(bo, vmf); - if (ret) - return ret; - - ret = amdgpu_bo_fault_reserve_notify(bo); - if (ret) - goto unlock; - - ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, - TTM_BO_VM_NUM_PREFAULT, 1); - if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) - return ret; - -unlock: - dma_resv_unlock(bo->base.resv); - return ret; -} - -static const struct vm_operations_struct amdgpu_ttm_vm_ops = { - .fault = amdgpu_ttm_fault, - .open = ttm_bo_vm_open, - .close = ttm_bo_vm_close, - .access = ttm_bo_vm_access -}; - -int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *file_priv = filp->private_data; - struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev); - int r; - - r = ttm_bo_mmap(filp, vma, &adev->mman.bdev); - if (unlikely(r != 0)) - return r; - - vma->vm_ops = &amdgpu_ttm_vm_ops; - return 0; -} - int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, struct dma_resv *resv, @@ -2043,16 +1958,21 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, return -EINVAL; } - if (bo->tbo.mem.mem_type == TTM_PL_TT) { + if (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT) { + DRM_ERROR("Trying to clear preemptible memory.\n"); + return -EINVAL; + } + + if (bo->tbo.resource->mem_type == TTM_PL_TT) { r = amdgpu_ttm_alloc_gart(&bo->tbo); if (r) return r; } - num_bytes = bo->tbo.mem.num_pages << PAGE_SHIFT; + num_bytes = bo->tbo.resource->num_pages << PAGE_SHIFT; num_loops = 0; - amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor); + amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor); while (cursor.remaining) { num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes); amdgpu_res_next(&cursor, cursor.size); @@ -2077,12 +1997,13 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, } } - amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor); + amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor); while (cursor.remaining) { uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes); uint64_t dst_addr = cursor.start; - dst_addr += amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type); + dst_addr += amdgpu_ttm_domain_start(adev, + bo->tbo.resource->mem_type); amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr, cur_size); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 9e38475e0f8d..e69f3e8e06e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -31,17 +31,13 @@ #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1) #define AMDGPU_PL_OA (TTM_PL_PRIV + 2) +#define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3) #define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 #define AMDGPU_POISON 0xd0bed0be -struct amdgpu_vram_reservation { - struct list_head node; - struct drm_mm_node mm_node; -}; - struct amdgpu_vram_mgr { struct ttm_resource_manager manager; struct drm_mm mm; @@ -59,6 +55,11 @@ struct amdgpu_gtt_mgr { atomic64_t available; }; +struct amdgpu_preempt_mgr { + struct ttm_resource_manager manager; + atomic64_t used; +}; + struct amdgpu_mman { struct ttm_device bdev; bool initialized; @@ -75,6 +76,7 @@ struct amdgpu_mman { struct amdgpu_vram_mgr vram_mgr; struct amdgpu_gtt_mgr gtt_mgr; + struct amdgpu_preempt_mgr preempt_mgr; uint64_t stolen_vga_size; struct amdgpu_bo *stolen_vga_memory; @@ -82,6 +84,10 @@ struct amdgpu_mman { struct amdgpu_bo *stolen_extended_memory; bool keep_stolen_vga_memory; + struct amdgpu_bo *stolen_reserved_memory; + uint64_t stolen_reserved_offset; + uint64_t stolen_reserved_size; + /* discovery */ uint8_t *discovery_bin; uint32_t discovery_tmr_size; @@ -102,6 +108,8 @@ struct amdgpu_copy_mem { int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size); void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev); +int amdgpu_preempt_mgr_init(struct amdgpu_device *adev); +void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev); int amdgpu_vram_mgr_init(struct amdgpu_device *adev); void amdgpu_vram_mgr_fini(struct amdgpu_device *adev); @@ -109,6 +117,8 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem); uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man); int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man); +uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man); + u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo); int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, struct ttm_resource *mem, @@ -147,7 +157,6 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, struct dma_resv *resv, struct dma_fence **fence); -int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo); int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 9733224117e3..2834981f8c08 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -257,36 +257,36 @@ void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr) container_of(hdr, struct psp_firmware_header_v1_0, header); DRM_DEBUG("ucode_feature_version: %u\n", - le32_to_cpu(psp_hdr->ucode_feature_version)); + le32_to_cpu(psp_hdr->sos.fw_version)); DRM_DEBUG("sos_offset_bytes: %u\n", - le32_to_cpu(psp_hdr->sos_offset_bytes)); + le32_to_cpu(psp_hdr->sos.offset_bytes)); DRM_DEBUG("sos_size_bytes: %u\n", - le32_to_cpu(psp_hdr->sos_size_bytes)); + le32_to_cpu(psp_hdr->sos.size_bytes)); if (version_minor == 1) { const struct psp_firmware_header_v1_1 *psp_hdr_v1_1 = container_of(psp_hdr, struct psp_firmware_header_v1_1, v1_0); DRM_DEBUG("toc_header_version: %u\n", - le32_to_cpu(psp_hdr_v1_1->toc_header_version)); + le32_to_cpu(psp_hdr_v1_1->toc.fw_version)); DRM_DEBUG("toc_offset_bytes: %u\n", - le32_to_cpu(psp_hdr_v1_1->toc_offset_bytes)); + le32_to_cpu(psp_hdr_v1_1->toc.offset_bytes)); DRM_DEBUG("toc_size_bytes: %u\n", - le32_to_cpu(psp_hdr_v1_1->toc_size_bytes)); + le32_to_cpu(psp_hdr_v1_1->toc.size_bytes)); DRM_DEBUG("kdb_header_version: %u\n", - le32_to_cpu(psp_hdr_v1_1->kdb_header_version)); + le32_to_cpu(psp_hdr_v1_1->kdb.fw_version)); DRM_DEBUG("kdb_offset_bytes: %u\n", - le32_to_cpu(psp_hdr_v1_1->kdb_offset_bytes)); + le32_to_cpu(psp_hdr_v1_1->kdb.offset_bytes)); DRM_DEBUG("kdb_size_bytes: %u\n", - le32_to_cpu(psp_hdr_v1_1->kdb_size_bytes)); + le32_to_cpu(psp_hdr_v1_1->kdb.size_bytes)); } if (version_minor == 2) { const struct psp_firmware_header_v1_2 *psp_hdr_v1_2 = container_of(psp_hdr, struct psp_firmware_header_v1_2, v1_0); DRM_DEBUG("kdb_header_version: %u\n", - le32_to_cpu(psp_hdr_v1_2->kdb_header_version)); + le32_to_cpu(psp_hdr_v1_2->kdb.fw_version)); DRM_DEBUG("kdb_offset_bytes: %u\n", - le32_to_cpu(psp_hdr_v1_2->kdb_offset_bytes)); + le32_to_cpu(psp_hdr_v1_2->kdb.offset_bytes)); DRM_DEBUG("kdb_size_bytes: %u\n", - le32_to_cpu(psp_hdr_v1_2->kdb_size_bytes)); + le32_to_cpu(psp_hdr_v1_2->kdb.size_bytes)); } if (version_minor == 3) { const struct psp_firmware_header_v1_1 *psp_hdr_v1_1 = @@ -294,23 +294,23 @@ void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr) const struct psp_firmware_header_v1_3 *psp_hdr_v1_3 = container_of(psp_hdr_v1_1, struct psp_firmware_header_v1_3, v1_1); DRM_DEBUG("toc_header_version: %u\n", - le32_to_cpu(psp_hdr_v1_3->v1_1.toc_header_version)); + le32_to_cpu(psp_hdr_v1_3->v1_1.toc.fw_version)); DRM_DEBUG("toc_offset_bytes: %u\n", - le32_to_cpu(psp_hdr_v1_3->v1_1.toc_offset_bytes)); + le32_to_cpu(psp_hdr_v1_3->v1_1.toc.offset_bytes)); DRM_DEBUG("toc_size_bytes: %u\n", - le32_to_cpu(psp_hdr_v1_3->v1_1.toc_size_bytes)); + le32_to_cpu(psp_hdr_v1_3->v1_1.toc.size_bytes)); DRM_DEBUG("kdb_header_version: %u\n", - le32_to_cpu(psp_hdr_v1_3->v1_1.kdb_header_version)); + le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.fw_version)); DRM_DEBUG("kdb_offset_bytes: %u\n", - le32_to_cpu(psp_hdr_v1_3->v1_1.kdb_offset_bytes)); + le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.offset_bytes)); DRM_DEBUG("kdb_size_bytes: %u\n", - le32_to_cpu(psp_hdr_v1_3->v1_1.kdb_size_bytes)); + le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.size_bytes)); DRM_DEBUG("spl_header_version: %u\n", - le32_to_cpu(psp_hdr_v1_3->spl_header_version)); + le32_to_cpu(psp_hdr_v1_3->spl.fw_version)); DRM_DEBUG("spl_offset_bytes: %u\n", - le32_to_cpu(psp_hdr_v1_3->spl_offset_bytes)); + le32_to_cpu(psp_hdr_v1_3->spl.offset_bytes)); DRM_DEBUG("spl_size_bytes: %u\n", - le32_to_cpu(psp_hdr_v1_3->spl_size_bytes)); + le32_to_cpu(psp_hdr_v1_3->spl.size_bytes)); } } else { DRM_ERROR("Unknown PSP ucode version: %u.%u\n", @@ -403,6 +403,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: case CHIP_ALDEBARAN: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: if (!load_type) return AMDGPU_FW_LOAD_DIRECT; else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 2c42874f7784..270309e7f5f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -71,43 +71,39 @@ struct smc_firmware_header_v2_1 { uint32_t pptable_entry_offset; }; +struct psp_fw_bin_desc { + uint32_t fw_version; + uint32_t offset_bytes; + uint32_t size_bytes; +}; + /* version_major=1, version_minor=0 */ struct psp_firmware_header_v1_0 { struct common_firmware_header header; - uint32_t ucode_feature_version; - uint32_t sos_offset_bytes; - uint32_t sos_size_bytes; + struct psp_fw_bin_desc sos; }; /* version_major=1, version_minor=1 */ struct psp_firmware_header_v1_1 { struct psp_firmware_header_v1_0 v1_0; - uint32_t toc_header_version; - uint32_t toc_offset_bytes; - uint32_t toc_size_bytes; - uint32_t kdb_header_version; - uint32_t kdb_offset_bytes; - uint32_t kdb_size_bytes; + struct psp_fw_bin_desc toc; + struct psp_fw_bin_desc kdb; }; /* version_major=1, version_minor=2 */ struct psp_firmware_header_v1_2 { struct psp_firmware_header_v1_0 v1_0; - uint32_t reserve[3]; - uint32_t kdb_header_version; - uint32_t kdb_offset_bytes; - uint32_t kdb_size_bytes; + struct psp_fw_bin_desc res; + struct psp_fw_bin_desc kdb; }; /* version_major=1, version_minor=3 */ struct psp_firmware_header_v1_3 { struct psp_firmware_header_v1_1 v1_1; - uint32_t spl_header_version; - uint32_t spl_offset_bytes; - uint32_t spl_size_bytes; - uint32_t rl_header_version; - uint32_t rl_offset_bytes; - uint32_t rl_size_bytes; + struct psp_fw_bin_desc spl; + struct psp_fw_bin_desc rl; + struct psp_fw_bin_desc sys_drv_aux; + struct psp_fw_bin_desc sos_aux; }; /* version_major=1, version_minor=0 */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index ea6f99be070b..f4489773715e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -94,6 +94,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); if (adev->umc.ras_funcs && @@ -131,6 +132,9 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, amdgpu_ras_add_bad_pages(adev, err_data->err_addr, err_data->err_addr_cnt); amdgpu_ras_save_bad_pages(adev); + + if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num) + adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.num_recs); } amdgpu_ras_reset_gpu(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index bbcccf53080d..e5a75fb788dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -22,6 +22,11 @@ #define __AMDGPU_UMC_H__ /* + * (addr / 256) * 4096, the higher 26 bits in ErrorAddr + * is the index of 4KB block + */ +#define ADDR_OF_4KB_BLOCK(addr) (((addr) & ~0xffULL) << 4) +/* * (addr / 256) * 8192, the higher 26 bits in ErrorAddr * is the index of 8KB block */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index c6dbc0801604..0f576f294d8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -32,6 +32,7 @@ #include <linux/module.h> #include <drm/drm.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_pm.h" @@ -375,7 +376,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) { unsigned size; void *ptr; - int i, j; + int i, j, idx; bool in_ras_intr = amdgpu_ras_intr_triggered(); cancel_delayed_work_sync(&adev->uvd.idle_work); @@ -403,11 +404,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) if (!adev->uvd.inst[j].saved_bo) return -ENOMEM; - /* re-write 0 since err_event_athub will corrupt VCPU buffer */ - if (in_ras_intr) - memset(adev->uvd.inst[j].saved_bo, 0, size); - else - memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); + if (drm_dev_enter(&adev->ddev, &idx)) { + /* re-write 0 since err_event_athub will corrupt VCPU buffer */ + if (in_ras_intr) + memset(adev->uvd.inst[j].saved_bo, 0, size); + else + memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); + + drm_dev_exit(idx); + } } if (in_ras_intr) @@ -420,7 +425,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) { unsigned size; void *ptr; - int i; + int i, idx; for (i = 0; i < adev->uvd.num_uvd_inst; i++) { if (adev->uvd.harvest_config & (1 << i)) @@ -432,7 +437,10 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) ptr = adev->uvd.inst[i].cpu_addr; if (adev->uvd.inst[i].saved_bo != NULL) { - memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); + if (drm_dev_enter(&adev->ddev, &idx)) { + memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); + drm_dev_exit(idx); + } kvfree(adev->uvd.inst[i].saved_bo); adev->uvd.inst[i].saved_bo = NULL; } else { @@ -442,8 +450,11 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->uvd.fw->data; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, - le32_to_cpu(hdr->ucode_size_bytes)); + if (drm_dev_enter(&adev->ddev, &idx)) { + memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, + le32_to_cpu(hdr->ucode_size_bytes)); + drm_dev_exit(idx); + } size -= le32_to_cpu(hdr->ucode_size_bytes); ptr += le32_to_cpu(hdr->ucode_size_bytes); } @@ -829,9 +840,8 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, default: DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); - return -EINVAL; } - BUG(); + return -EINVAL; } @@ -1115,9 +1125,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->length_dw = 16; if (direct) { - r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, - true, false, - msecs_to_jiffies(10)); + r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false, + msecs_to_jiffies(10)); if (r == 0) r = -ETIMEDOUT; if (r < 0) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index ea6a62f67e38..1ae7f824adc7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -29,6 +29,7 @@ #include <linux/module.h> #include <drm/drm.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_pm.h" @@ -87,7 +88,7 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, bool direct, struct dma_fence **fence); /** - * amdgpu_vce_init - allocate memory, load vce firmware + * amdgpu_vce_sw_init - allocate memory, load vce firmware * * @adev: amdgpu_device pointer * @size: size for the new BO @@ -204,7 +205,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) } /** - * amdgpu_vce_fini - free memory + * amdgpu_vce_sw_fini - free memory * * @adev: amdgpu_device pointer * @@ -293,7 +294,7 @@ int amdgpu_vce_resume(struct amdgpu_device *adev) void *cpu_addr; const struct common_firmware_header *hdr; unsigned offset; - int r; + int r, idx; if (adev->vce.vcpu_bo == NULL) return -EINVAL; @@ -313,8 +314,12 @@ int amdgpu_vce_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->vce.fw->data; offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy_toio(cpu_addr, adev->vce.fw->data + offset, - adev->vce.fw->size - offset); + + if (drm_dev_enter(&adev->ddev, &idx)) { + memcpy_toio(cpu_addr, adev->vce.fw->data + offset, + adev->vce.fw->size - offset); + drm_dev_exit(idx); + } amdgpu_bo_kunmap(adev->vce.vcpu_bo); @@ -574,7 +579,7 @@ err: } /** - * amdgpu_vce_cs_validate_bo - make sure not to cross 4GB boundary + * amdgpu_vce_validate_bo - make sure not to cross 4GB boundary * * @p: parser context * @ib_idx: indirect buffer to use @@ -715,7 +720,7 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, } /** - * amdgpu_vce_cs_parse - parse and validate the command stream + * amdgpu_vce_ring_parse_cs - parse and validate the command stream * * @p: parser context * @ib_idx: indirect buffer to use @@ -951,7 +956,7 @@ out: } /** - * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode + * amdgpu_vce_ring_parse_cs_vm - parse the command stream in VM mode * * @p: parser context * @ib_idx: indirect buffer to use diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 201645963ba5..6780df0fb265 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -27,6 +27,7 @@ #include <linux/firmware.h> #include <linux/module.h> #include <linux/pci.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_pm.h" @@ -48,6 +49,8 @@ #define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin" #define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin" #define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin" +#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin" +#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin" MODULE_FIRMWARE(FIRMWARE_RAVEN); MODULE_FIRMWARE(FIRMWARE_PICASSO); @@ -63,6 +66,8 @@ MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID); MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER); MODULE_FIRMWARE(FIRMWARE_VANGOGH); MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH); +MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY); +MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP); static void amdgpu_vcn_idle_work_handler(struct work_struct *work); @@ -151,6 +156,18 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; break; + case CHIP_BEIGE_GOBY: + fw_name = FIRMWARE_BEIGE_GOBY; + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = true; + break; + case CHIP_YELLOW_CARP: + fw_name = FIRMWARE_YELLOW_CARP; + if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) && + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) + adev->vcn.indirect_sram = true; + break; default: return -EINVAL; } @@ -271,11 +288,34 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) return 0; } +bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance) +{ + bool ret = false; + + int major; + int minor; + int revision; + + /* if cannot find IP data, then this VCN does not exist */ + if (amdgpu_discovery_get_vcn_version(adev, vcn_instance, &major, &minor, &revision) != 0) + return true; + + if ((type == VCN_ENCODE_RING) && (revision & VCN_BLOCK_ENCODE_DISABLE_MASK)) { + ret = true; + } else if ((type == VCN_DECODE_RING) && (revision & VCN_BLOCK_DECODE_DISABLE_MASK)) { + ret = true; + } else if ((type == VCN_UNIFIED_RING) && (revision & VCN_BLOCK_QUEUE_DISABLE_MASK)) { + ret = true; + } + + return ret; +} + int amdgpu_vcn_suspend(struct amdgpu_device *adev) { unsigned size; void *ptr; - int i; + int i, idx; cancel_delayed_work_sync(&adev->vcn.idle_work); @@ -292,7 +332,10 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev) if (!adev->vcn.inst[i].saved_bo) return -ENOMEM; - memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); + if (drm_dev_enter(&adev->ddev, &idx)) { + memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size); + drm_dev_exit(idx); + } } return 0; } @@ -301,7 +344,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) { unsigned size; void *ptr; - int i; + int i, idx; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) @@ -313,7 +356,10 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) ptr = adev->vcn.inst[i].cpu_addr; if (adev->vcn.inst[i].saved_bo != NULL) { - memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size); + if (drm_dev_enter(&adev->ddev, &idx)) { + memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size); + drm_dev_exit(idx); + } kvfree(adev->vcn.inst[i].saved_bo); adev->vcn.inst[i].saved_bo = NULL; } else { @@ -323,8 +369,11 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->vcn.fw->data; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset, - le32_to_cpu(hdr->ucode_size_bytes)); + if (drm_dev_enter(&adev->ddev, &idx)) { + memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset, + le32_to_cpu(hdr->ucode_size_bytes)); + drm_dev_exit(idx); + } size -= le32_to_cpu(hdr->ucode_size_bytes); ptr += le32_to_cpu(hdr->ucode_size_bytes); } @@ -367,7 +416,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) } if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) { - amdgpu_gfx_off_ctrl(adev, true); amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_GATE); r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, @@ -387,7 +435,6 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) atomic_inc(&adev->vcn.total_submission_cnt); if (!cancel_delayed_work_sync(&adev->vcn.idle_work)) { - amdgpu_gfx_off_ctrl(adev, false); r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO, true); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index bc76cab67697..d74c62b49795 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -280,6 +280,16 @@ struct amdgpu_vcn_decode_buffer { uint32_t pad[30]; }; +#define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80 +#define VCN_BLOCK_DECODE_DISABLE_MASK 0x40 +#define VCN_BLOCK_QUEUE_DISABLE_MASK 0xC0 + +enum vcn_ring_type { + VCN_ENCODE_RING, + VCN_DECODE_RING, + VCN_UNIFIED_RING, +}; + int amdgpu_vcn_sw_init(struct amdgpu_device *adev); int amdgpu_vcn_sw_fini(struct amdgpu_device *adev); int amdgpu_vcn_suspend(struct amdgpu_device *adev); @@ -287,6 +297,9 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev); void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring); void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring); +bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, + enum vcn_ring_type type, uint32_t vcn_instance); + int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring); int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout); int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 0c9c5255aa42..b71dd1deeb2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -50,9 +50,12 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev) struct drm_device *ddev = adev_to_drm(adev); /* enable virtual display */ - if (adev->mode_info.num_crtc == 0) - adev->mode_info.num_crtc = 1; - adev->enable_virtual_display = true; + if (adev->asic_type != CHIP_ALDEBARAN && + adev->asic_type != CHIP_ARCTURUS) { + if (adev->mode_info.num_crtc == 0) + adev->mode_info.num_crtc = 1; + adev->enable_virtual_display = true; + } ddev->driver_features &= ~DRIVER_ATOMIC; adev->cg_flags = 0; adev->pg_flags = 0; @@ -429,6 +432,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) uint32_t checksum; uint32_t checkval; + uint32_t i; + uint32_t tmp; + if (adev->virt.fw_reserve.p_pf2vf == NULL) return -EINVAL; @@ -469,6 +475,29 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) adev->virt.reg_access = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all; + adev->virt.decode_max_dimension_pixels = 0; + adev->virt.decode_max_frame_pixels = 0; + adev->virt.encode_max_dimension_pixels = 0; + adev->virt.encode_max_frame_pixels = 0; + adev->virt.is_mm_bw_enabled = false; + for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) { + tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels; + adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels); + + tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels; + adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels); + + tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels; + adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels); + + tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels; + adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels); + } + if((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0)) + adev->virt.is_mm_bw_enabled = true; + + adev->unique_id = + ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid; break; default: DRM_ERROR("invalid pf2vf version\n"); @@ -679,6 +708,7 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev) case CHIP_VEGA10: case CHIP_VEGA20: case CHIP_ARCTURUS: + case CHIP_ALDEBARAN: soc15_set_virt_ops(adev); break; case CHIP_NAVI10: @@ -740,3 +770,35 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad return mode; } + +void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev, + struct amdgpu_video_codec_info *encode, uint32_t encode_array_size, + struct amdgpu_video_codec_info *decode, uint32_t decode_array_size) +{ + uint32_t i; + + if (!adev->virt.is_mm_bw_enabled) + return; + + if (encode) { + for (i = 0; i < encode_array_size; i++) { + encode[i].max_width = adev->virt.encode_max_dimension_pixels; + encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels; + if (encode[i].max_width > 0) + encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width; + else + encode[i].max_height = 0; + } + } + + if (decode) { + for (i = 0; i < decode_array_size; i++) { + decode[i].max_width = adev->virt.decode_max_dimension_pixels; + decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels; + if (decode[i].max_width > 0) + decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width; + else + decode[i].max_height = 0; + } + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 383d4bdc3fb5..8d4c20bb71c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -233,8 +233,17 @@ struct amdgpu_virt { /* vf2pf message */ struct delayed_work vf2pf_work; uint32_t vf2pf_update_interval_ms; + + /* multimedia bandwidth config */ + bool is_mm_bw_enabled; + uint32_t decode_max_dimension_pixels; + uint32_t decode_max_frame_pixels; + uint32_t encode_max_dimension_pixels; + uint32_t encode_max_frame_pixels; }; +struct amdgpu_video_codec_info; + #define amdgpu_sriov_enabled(adev) \ ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) @@ -307,4 +316,8 @@ int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev); void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev); enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev); + +void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev, + struct amdgpu_video_codec_info *encode, uint32_t encode_array_size, + struct amdgpu_video_codec_info *decode, uint32_t decode_array_size); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 9acee4a5b2ba..79cfa2d68487 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -25,18 +25,22 @@ * Alex Deucher * Jerome Glisse */ + #include <linux/dma-fence-array.h> #include <linux/interval_tree_generic.h> #include <linux/idr.h> #include <linux/dma-buf.h> #include <drm/amdgpu_drm.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" #include "amdgpu_gmc.h" #include "amdgpu_xgmi.h" #include "amdgpu_dma_buf.h" +#include "amdgpu_res_cursor.h" +#include "kfd_svm.h" /** * DOC: GPUVM @@ -328,7 +332,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, base->next = bo->vm_bo; bo->vm_bo = base; - if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) + if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) return; vm->bulk_moveable = false; @@ -338,7 +342,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, amdgpu_vm_bo_idle(base); if (bo->preferred_domains & - amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) + amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)) return; /* @@ -357,14 +361,14 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, * Helper to get the parent entry for the child page table. NULL if we are at * the root page directory. */ -static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt) +static struct amdgpu_vm_bo_base *amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt) { - struct amdgpu_bo *parent = pt->base.bo->parent; + struct amdgpu_bo *parent = pt->bo->parent; if (!parent) return NULL; - return container_of(parent->vm_bo, struct amdgpu_vm_pt, base); + return parent->vm_bo; } /* @@ -372,8 +376,8 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt) */ struct amdgpu_vm_pt_cursor { uint64_t pfn; - struct amdgpu_vm_pt *parent; - struct amdgpu_vm_pt *entry; + struct amdgpu_vm_bo_base *parent; + struct amdgpu_vm_bo_base *entry; unsigned level; }; @@ -412,17 +416,17 @@ static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev, { unsigned mask, shift, idx; - if (!cursor->entry->entries) + if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry || + !cursor->entry->bo) return false; - BUG_ON(!cursor->entry->base.bo); mask = amdgpu_vm_entries_mask(adev, cursor->level); shift = amdgpu_vm_level_shift(adev, cursor->level); ++cursor->level; idx = (cursor->pfn >> shift) & mask; cursor->parent = cursor->entry; - cursor->entry = &cursor->entry->entries[idx]; + cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx]; return true; } @@ -449,7 +453,7 @@ static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev, shift = amdgpu_vm_level_shift(adev, cursor->level - 1); num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1); - if (cursor->entry == &cursor->parent->entries[num_entries - 1]) + if (cursor->entry == &to_amdgpu_bo_vm(cursor->parent->bo)->entries[num_entries - 1]) return false; cursor->pfn += 1ULL << shift; @@ -535,7 +539,7 @@ static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev, * True when the search should continue, false otherwise. */ static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start, - struct amdgpu_vm_pt *entry) + struct amdgpu_vm_bo_base *entry) { return entry && (!start || entry != start->entry); } @@ -586,7 +590,7 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct amdgpu_bo_list_entry *entry) { entry->priority = 0; - entry->tv.bo = &vm->root.base.bo->tbo; + entry->tv.bo = &vm->root.bo->tbo; /* Two for VM updates, one for TTM and one for the CS job */ entry->tv.num_shared = 4; entry->user_pages = NULL; @@ -618,7 +622,7 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) { struct amdgpu_vm *vm = bo_base->vm; - if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) + if (abo->tbo.base.resv == vm->root.bo->tbo.base.resv) vm->bulk_moveable = false; } @@ -649,15 +653,16 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, spin_lock(&adev->mman.bdev.lru_lock); list_for_each_entry(bo_base, &vm->idle, vm_status) { struct amdgpu_bo *bo = bo_base->bo; + struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo); if (!bo->parent) continue; - ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem, + ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource, &vm->lru_bulk_move); - if (bo->shadow) - ttm_bo_move_to_lru_tail(&bo->shadow->tbo, - &bo->shadow->tbo.mem, + if (shadow) + ttm_bo_move_to_lru_tail(&shadow->tbo, + shadow->tbo.resource, &vm->lru_bulk_move); } spin_unlock(&adev->mman.bdev.lru_lock); @@ -689,15 +694,21 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { struct amdgpu_bo *bo = bo_base->bo; + struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo); r = validate(param, bo); if (r) return r; + if (shadow) { + r = validate(param, shadow); + if (r) + return r; + } if (bo->tbo.type != ttm_bo_type_kernel) { amdgpu_vm_bo_moved(bo_base); } else { - vm->update_funcs->map_table(bo); + vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); amdgpu_vm_bo_relocated(bo_base); } } @@ -729,7 +740,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm) * * @adev: amdgpu_device pointer * @vm: VM to clear BO from - * @bo: BO to clear + * @vmbo: BO to clear * @immediate: use an immediate update * * Root PD needs to be reserved when calling this. @@ -739,13 +750,14 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm) */ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_bo *bo, + struct amdgpu_bo_vm *vmbo, bool immediate) { struct ttm_operation_ctx ctx = { true, false }; unsigned level = adev->vm_manager.root_level; struct amdgpu_vm_update_params params; - struct amdgpu_bo *ancestor = bo; + struct amdgpu_bo *ancestor = &vmbo->bo; + struct amdgpu_bo *bo = &vmbo->bo; unsigned entries, ats_entries; uint64_t addr; int r; @@ -769,11 +781,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, entries -= ats_entries; } else { - struct amdgpu_vm_pt *pt; + struct amdgpu_vm_bo_base *pt; - pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base); + pt = ancestor->vm_bo; ats_entries = amdgpu_vm_num_ats_entries(adev); - if ((pt - vm->root.entries) >= ats_entries) { + if ((pt - to_amdgpu_bo_vm(vm->root.bo)->entries) >= ats_entries) { ats_entries = 0; } else { ats_entries = entries; @@ -785,14 +797,15 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (r) return r; - if (bo->shadow) { - r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement, - &ctx); + if (vmbo->shadow) { + struct amdgpu_bo *shadow = vmbo->shadow; + + r = ttm_bo_validate(&shadow->tbo, &shadow->placement, &ctx); if (r) return r; } - r = vm->update_funcs->map_table(bo); + r = vm->update_funcs->map_table(vmbo); if (r) return r; @@ -816,7 +829,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, amdgpu_gmc_get_vm_pde(adev, level, &value, &flags); } - r = vm->update_funcs->update(¶ms, bo, addr, 0, ats_entries, + r = vm->update_funcs->update(¶ms, vmbo, addr, 0, ats_entries, value, flags); if (r) return r; @@ -839,7 +852,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, } } - r = vm->update_funcs->update(¶ms, bo, addr, 0, entries, + r = vm->update_funcs->update(¶ms, vmbo, addr, 0, entries, value, flags); if (r) return r; @@ -849,35 +862,85 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, } /** - * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation + * amdgpu_vm_pt_create - create bo for PD/PT * * @adev: amdgpu_device pointer * @vm: requesting vm * @level: the page table level * @immediate: use a immediate update - * @bp: resulting BO allocation parameters + * @vmbo: pointer to the buffer object pointer */ -static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, +static int amdgpu_vm_pt_create(struct amdgpu_device *adev, + struct amdgpu_vm *vm, int level, bool immediate, - struct amdgpu_bo_param *bp) + struct amdgpu_bo_vm **vmbo) { - memset(bp, 0, sizeof(*bp)); + struct amdgpu_bo_param bp; + struct amdgpu_bo *bo; + struct dma_resv *resv; + unsigned int num_entries; + int r; - bp->size = amdgpu_vm_bo_size(adev, level); - bp->byte_align = AMDGPU_GPU_PAGE_SIZE; - bp->domain = AMDGPU_GEM_DOMAIN_VRAM; - bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain); - bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | + memset(&bp, 0, sizeof(bp)); + + bp.size = amdgpu_vm_bo_size(adev, level); + bp.byte_align = AMDGPU_GPU_PAGE_SIZE; + bp.domain = AMDGPU_GEM_DOMAIN_VRAM; + bp.domain = amdgpu_bo_get_preferred_pin_domain(adev, bp.domain); + bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | AMDGPU_GEM_CREATE_CPU_GTT_USWC; - bp->bo_ptr_size = sizeof(struct amdgpu_bo); + + if (level < AMDGPU_VM_PTB) + num_entries = amdgpu_vm_num_entries(adev, level); + else + num_entries = 0; + + bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries); + if (vm->use_cpu_for_update) - bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - else if (!vm->root.base.bo || vm->root.base.bo->shadow) - bp->flags |= AMDGPU_GEM_CREATE_SHADOW; - bp->type = ttm_bo_type_kernel; - bp->no_wait_gpu = immediate; - if (vm->root.base.bo) - bp->resv = vm->root.base.bo->tbo.base.resv; + bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + + bp.type = ttm_bo_type_kernel; + bp.no_wait_gpu = immediate; + if (vm->root.bo) + bp.resv = vm->root.bo->tbo.base.resv; + + r = amdgpu_bo_create_vm(adev, &bp, vmbo); + if (r) + return r; + + bo = &(*vmbo)->bo; + if (vm->is_compute_context || (adev->flags & AMD_IS_APU)) { + (*vmbo)->shadow = NULL; + return 0; + } + + if (!bp.resv) + WARN_ON(dma_resv_lock(bo->tbo.base.resv, + NULL)); + resv = bp.resv; + memset(&bp, 0, sizeof(bp)); + bp.size = amdgpu_vm_bo_size(adev, level); + bp.domain = AMDGPU_GEM_DOMAIN_GTT; + bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; + bp.type = ttm_bo_type_kernel; + bp.resv = bo->tbo.base.resv; + bp.bo_ptr_size = sizeof(struct amdgpu_bo); + + r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow); + + if (!resv) + dma_resv_unlock(bo->tbo.base.resv); + + if (r) { + amdgpu_bo_unref(&bo); + return r; + } + + (*vmbo)->shadow->parent = amdgpu_bo_ref(bo); + amdgpu_bo_add_to_shadow_list(*vmbo); + + return 0; } /** @@ -899,37 +962,24 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, struct amdgpu_vm_pt_cursor *cursor, bool immediate) { - struct amdgpu_vm_pt *entry = cursor->entry; - struct amdgpu_bo_param bp; - struct amdgpu_bo *pt; + struct amdgpu_vm_bo_base *entry = cursor->entry; + struct amdgpu_bo *pt_bo; + struct amdgpu_bo_vm *pt; int r; - if (cursor->level < AMDGPU_VM_PTB && !entry->entries) { - unsigned num_entries; - - num_entries = amdgpu_vm_num_entries(adev, cursor->level); - entry->entries = kvmalloc_array(num_entries, - sizeof(*entry->entries), - GFP_KERNEL | __GFP_ZERO); - if (!entry->entries) - return -ENOMEM; - } - - if (entry->base.bo) + if (entry->bo) return 0; - amdgpu_vm_bo_param(adev, vm, cursor->level, immediate, &bp); - - r = amdgpu_bo_create(adev, &bp, &pt); + r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt); if (r) return r; /* Keep a reference to the root directory to avoid * freeing them up in the wrong order. */ - pt->parent = amdgpu_bo_ref(cursor->parent->base.bo); - amdgpu_vm_bo_base_init(&entry->base, vm, pt); - + pt_bo = &pt->bo; + pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo); + amdgpu_vm_bo_base_init(entry, vm, pt_bo); r = amdgpu_vm_clear_bo(adev, vm, pt, immediate); if (r) goto error_free_pt; @@ -938,7 +988,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, error_free_pt: amdgpu_bo_unref(&pt->shadow); - amdgpu_bo_unref(&pt); + amdgpu_bo_unref(&pt_bo); return r; } @@ -947,16 +997,17 @@ error_free_pt: * * @entry: PDE to free */ -static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry) +static void amdgpu_vm_free_table(struct amdgpu_vm_bo_base *entry) { - if (entry->base.bo) { - entry->base.bo->vm_bo = NULL; - list_del(&entry->base.vm_status); - amdgpu_bo_unref(&entry->base.bo->shadow); - amdgpu_bo_unref(&entry->base.bo); - } - kvfree(entry->entries); - entry->entries = NULL; + struct amdgpu_bo *shadow; + + if (!entry->bo) + return; + shadow = amdgpu_bo_shadowed(entry->bo); + entry->bo->vm_bo = NULL; + list_del(&entry->vm_status); + amdgpu_bo_unref(&shadow); + amdgpu_bo_unref(&entry->bo); } /** @@ -973,7 +1024,7 @@ static void amdgpu_vm_free_pts(struct amdgpu_device *adev, struct amdgpu_vm_pt_cursor *start) { struct amdgpu_vm_pt_cursor cursor; - struct amdgpu_vm_pt *entry; + struct amdgpu_vm_bo_base *entry; vm->bulk_moveable = false; @@ -1241,10 +1292,10 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) */ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params, struct amdgpu_vm *vm, - struct amdgpu_vm_pt *entry) + struct amdgpu_vm_bo_base *entry) { - struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry); - struct amdgpu_bo *bo = parent->base.bo, *pbo; + struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry); + struct amdgpu_bo *bo = parent->bo, *pbo; uint64_t pde, pt, flags; unsigned level; @@ -1252,9 +1303,10 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params, pbo = pbo->parent; level += params->adev->vm_manager.root_level; - amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags); - pde = (entry - parent->entries) * 8; - return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags); + amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags); + pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8; + return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt, + 1, 0, flags); } /** @@ -1269,11 +1321,11 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct amdgpu_vm_pt_cursor cursor; - struct amdgpu_vm_pt *entry; + struct amdgpu_vm_bo_base *entry; for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) - if (entry->base.bo && !entry->base.moved) - amdgpu_vm_bo_relocated(&entry->base); + if (entry->bo && !entry->moved) + amdgpu_vm_bo_relocated(entry); } /** @@ -1307,11 +1359,12 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, return r; while (!list_empty(&vm->relocated)) { - struct amdgpu_vm_pt *entry; + struct amdgpu_vm_bo_base *entry; - entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt, - base.vm_status); - amdgpu_vm_bo_idle(&entry->base); + entry = list_first_entry(&vm->relocated, + struct amdgpu_vm_bo_base, + vm_status); + amdgpu_vm_bo_idle(entry); r = amdgpu_vm_update_pde(¶ms, vm, entry); if (r) @@ -1334,9 +1387,9 @@ error: * Make sure to set the right flags for the PTEs at the desired level. */ static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params, - struct amdgpu_bo *bo, unsigned level, + struct amdgpu_bo_vm *pt, unsigned int level, uint64_t pe, uint64_t addr, - unsigned count, uint32_t incr, + unsigned int count, uint32_t incr, uint64_t flags) { @@ -1352,7 +1405,7 @@ static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params, flags |= AMDGPU_PTE_EXECUTABLE; } - params->vm->update_funcs->update(params, bo, pe, addr, count, incr, + params->vm->update_funcs->update(params, pt, pe, addr, count, incr, flags); } @@ -1491,7 +1544,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, continue; } - pt = cursor.entry->base.bo; + pt = cursor.entry->bo; if (!pt) { /* We need all PDs and PTs for mapping something, */ if (flags & AMDGPU_PTE_VALID) @@ -1503,7 +1556,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, if (!amdgpu_vm_pt_ancestor(&cursor)) return -EINVAL; - pt = cursor.entry->base.bo; + pt = cursor.entry->bo; shift = parent_shift; frag_end = max(frag_end, ALIGN(frag_start + 1, 1ULL << shift)); @@ -1532,9 +1585,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, nptes, dst, incr, upd_flags, vm->task_info.pid, vm->immediate.fence_context); - amdgpu_vm_update_flags(params, pt, cursor.level, - pe_start, dst, nptes, incr, - upd_flags); + amdgpu_vm_update_flags(params, to_amdgpu_bo_vm(pt), + cursor.level, pe_start, dst, + nptes, incr, upd_flags); pe_start += nptes * 8; dst += nptes * incr; @@ -1557,7 +1610,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, * completely covered by the range and so potentially still in use. */ while (cursor.pfn < frag_start) { - amdgpu_vm_free_pts(adev, params->vm, &cursor); + /* Make sure previous mapping is freed */ + if (cursor.entry->bo) { + params->table_freed = true; + amdgpu_vm_free_pts(adev, params->vm, &cursor); + } amdgpu_vm_pt_next(adev, &cursor); } @@ -1583,29 +1640,34 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, * @last: last mapped entry * @flags: flags for the entries * @offset: offset into nodes and pages_addr - * @nodes: array of drm_mm_nodes with the MC addresses + * @res: ttm_resource to map * @pages_addr: DMA addresses to use for mapping * @fence: optional resulting fence + * @table_freed: return true if page table is freed * * Fill in the page table entries between @start and @last. * * Returns: * 0 for success, -EINVAL for failure. */ -static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, - struct amdgpu_device *bo_adev, - struct amdgpu_vm *vm, bool immediate, - bool unlocked, struct dma_resv *resv, - uint64_t start, uint64_t last, - uint64_t flags, uint64_t offset, - struct drm_mm_node *nodes, - dma_addr_t *pages_addr, - struct dma_fence **fence) +int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, + struct amdgpu_device *bo_adev, + struct amdgpu_vm *vm, bool immediate, + bool unlocked, struct dma_resv *resv, + uint64_t start, uint64_t last, + uint64_t flags, uint64_t offset, + struct ttm_resource *res, + dma_addr_t *pages_addr, + struct dma_fence **fence, + bool *table_freed) { struct amdgpu_vm_update_params params; + struct amdgpu_res_cursor cursor; enum amdgpu_sync_mode sync_mode; - uint64_t pfn; - int r; + int r, idx; + + if (!drm_dev_enter(&adev->ddev, &idx)) + return -ENODEV; memset(¶ms, 0, sizeof(params)); params.adev = adev; @@ -1622,14 +1684,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, else sync_mode = AMDGPU_SYNC_EXPLICIT; - pfn = offset >> PAGE_SHIFT; - if (nodes) { - while (pfn >= nodes->size) { - pfn -= nodes->size; - ++nodes; - } - } - amdgpu_vm_eviction_lock(vm); if (vm->evicting) { r = -EBUSY; @@ -1639,7 +1693,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { struct dma_fence *tmp = dma_fence_get_stub(); - amdgpu_bo_fence(vm->root.base.bo, vm->last_unlocked, true); + amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true); swap(vm->last_unlocked, tmp); dma_fence_put(tmp); } @@ -1648,23 +1702,17 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_unlock; - do { + amdgpu_res_first(pages_addr ? NULL : res, offset, + (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor); + while (cursor.remaining) { uint64_t tmp, num_entries, addr; - - num_entries = last - start + 1; - if (nodes) { - addr = nodes->start << PAGE_SHIFT; - num_entries = min((nodes->size - pfn) * - AMDGPU_GPU_PAGES_IN_CPU_PAGE, num_entries); - } else { - addr = 0; - } - + num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT; if (pages_addr) { bool contiguous = true; if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) { + uint64_t pfn = cursor.start >> PAGE_SHIFT; uint64_t count; contiguous = pages_addr[pfn + 1] == @@ -1684,16 +1732,18 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, } if (!contiguous) { - addr = pfn << PAGE_SHIFT; + addr = cursor.start; params.pages_addr = pages_addr; } else { - addr = pages_addr[pfn]; + addr = pages_addr[cursor.start >> PAGE_SHIFT]; params.pages_addr = NULL; } } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) { - addr += bo_adev->vm_manager.vram_base_offset; - addr += pfn << PAGE_SHIFT; + addr = bo_adev->vm_manager.vram_base_offset + + cursor.start; + } else { + addr = 0; } tmp = start + num_entries; @@ -1701,28 +1751,72 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_unlock; - pfn += num_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE; - if (nodes && nodes->size == pfn) { - pfn = 0; - ++nodes; - } + amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE); start = tmp; - - } while (unlikely(start != last + 1)); + } r = vm->update_funcs->commit(¶ms, fence); + if (table_freed) + *table_freed = *table_freed || params.table_freed; + error_unlock: amdgpu_vm_eviction_unlock(vm); + drm_dev_exit(idx); return r; } +void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, + uint64_t *gtt_mem, uint64_t *cpu_mem) +{ + struct amdgpu_bo_va *bo_va, *tmp; + + list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { + if (!bo_va->base.bo) + continue; + amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, + gtt_mem, cpu_mem); + } + list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { + if (!bo_va->base.bo) + continue; + amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, + gtt_mem, cpu_mem); + } + list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { + if (!bo_va->base.bo) + continue; + amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, + gtt_mem, cpu_mem); + } + list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { + if (!bo_va->base.bo) + continue; + amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, + gtt_mem, cpu_mem); + } + spin_lock(&vm->invalidated_lock); + list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { + if (!bo_va->base.bo) + continue; + amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, + gtt_mem, cpu_mem); + } + list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { + if (!bo_va->base.bo) + continue; + amdgpu_bo_get_memory(bo_va->base.bo, vram_mem, + gtt_mem, cpu_mem); + } + spin_unlock(&vm->invalidated_lock); +} /** * amdgpu_vm_bo_update - update all BO mappings in the vm page table * * @adev: amdgpu_device pointer * @bo_va: requested BO and VM object * @clear: if true clear the entries + * @table_freed: return true if page table is freed * * Fill in the page table entries for @bo_va. * @@ -1730,14 +1824,13 @@ error_unlock: * 0 for success, -EINVAL for failure. */ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - bool clear) + bool clear, bool *table_freed) { struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; struct ttm_resource *mem; - struct drm_mm_node *nodes; struct dma_fence **last_update; struct dma_resv *resv; uint64_t flags; @@ -1746,8 +1839,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, if (clear || !bo) { mem = NULL; - nodes = NULL; - resv = vm->root.base.bo->tbo.base.resv; + resv = vm->root.bo->tbo.base.resv; } else { struct drm_gem_object *obj = &bo->tbo.base; @@ -1757,12 +1849,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, struct drm_gem_object *gobj = dma_buf->priv; struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); - if (abo->tbo.mem.mem_type == TTM_PL_VRAM) + if (abo->tbo.resource->mem_type == TTM_PL_VRAM) bo = gem_to_amdgpu_bo(gobj); } - mem = &bo->tbo.mem; - nodes = mem->mm_node; - if (mem->mem_type == TTM_PL_TT) + mem = bo->tbo.resource; + if (mem->mem_type == TTM_PL_TT || + mem->mem_type == AMDGPU_PL_PREEMPT) pages_addr = bo->tbo.ttm->dma_address; } @@ -1778,7 +1870,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, } if (clear || (bo && bo->tbo.base.resv == - vm->root.base.bo->tbo.base.resv)) + vm->root.bo->tbo.base.resv)) last_update = &vm->last_update; else last_update = &bo_va->last_pt_update; @@ -1810,8 +1902,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, r = amdgpu_vm_bo_update_mapping(adev, bo_adev, vm, false, false, resv, mapping->start, mapping->last, update_flags, - mapping->offset, nodes, - pages_addr, last_update); + mapping->offset, mem, + pages_addr, last_update, table_freed); if (r) return r; } @@ -1820,8 +1912,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, * the evicted list so that it gets validated again on the * next command submission. */ - if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { - uint32_t mem_type = bo->tbo.mem.mem_type; + if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { + uint32_t mem_type = bo->tbo.resource->mem_type; if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type))) @@ -1957,18 +2049,17 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, */ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; + struct dma_resv *resv = vm->root.bo->tbo.base.resv; struct dma_fence *excl, **shared; unsigned i, shared_count; int r; - r = dma_resv_get_fences_rcu(resv, &excl, - &shared_count, &shared); + r = dma_resv_get_fences(resv, &excl, &shared_count, &shared); if (r) { /* Not enough memory to grab the fence list, as last resort * block for all the fences to complete. */ - dma_resv_wait_timeout_rcu(resv, true, false, + dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT); return; } @@ -2004,7 +2095,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence) { - struct dma_resv *resv = vm->root.base.bo->tbo.base.resv; + struct dma_resv *resv = vm->root.bo->tbo.base.resv; struct amdgpu_bo_va_mapping *mapping; uint64_t init_pte_value = 0; struct dma_fence *f = NULL; @@ -2022,7 +2113,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, r = amdgpu_vm_bo_update_mapping(adev, adev, vm, false, false, resv, mapping->start, mapping->last, init_pte_value, - 0, NULL, NULL, &f); + 0, NULL, NULL, &f, NULL); amdgpu_vm_free_mapping(adev, vm, mapping, f); if (r) { dma_fence_put(f); @@ -2064,7 +2155,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { /* Per VM BOs never need to bo cleared in the page tables */ - r = amdgpu_vm_bo_update(adev, bo_va, false); + r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); if (r) return r; } @@ -2083,7 +2174,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, else clear = true; - r = amdgpu_vm_bo_update(adev, bo_va, clear); + r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL); if (r) return r; @@ -2163,7 +2254,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, if (mapping->flags & AMDGPU_PTE_PRT) amdgpu_vm_prt_get(adev); - if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv && + if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && !bo_va->base.moved) { list_move(&bo_va->base.vm_status, &vm->moved); } @@ -2525,7 +2616,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_vm_bo_base **base; if (bo) { - if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) + if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) vm->bulk_moveable = false; for (base = &bo_va->base.bo->vm_bo; *base; @@ -2580,7 +2671,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo) return true; /* Don't evict VM page tables while they are busy */ - if (!dma_resv_test_signaled_rcu(bo->tbo.base.resv, true)) + if (!dma_resv_test_signaled(bo->tbo.base.resv, true)) return false; /* Try to block ongoing updates */ @@ -2613,13 +2704,13 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_vm_bo_base *bo_base; /* shadow bo doesn't have bo base, its validation needs its parent */ - if (bo->parent && bo->parent->shadow == bo) + if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo)) bo = bo->parent; for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { struct amdgpu_vm *vm = bo_base->vm; - if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { + if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { amdgpu_vm_bo_evicted(bo_base); continue; } @@ -2630,7 +2721,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, if (bo->tbo.type == ttm_bo_type_kernel) amdgpu_vm_bo_relocated(bo_base); - else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) + else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) amdgpu_vm_bo_moved(bo_base); else amdgpu_vm_bo_invalidated(bo_base); @@ -2760,8 +2851,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - timeout = dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv, - true, true, timeout); + timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true, + true, timeout); if (timeout <= 0) return timeout; @@ -2773,7 +2864,6 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) * * @adev: amdgpu_device pointer * @vm: requested vm - * @vm_context: Indicates if it GFX or Compute context * @pasid: Process address space identifier * * Init @vm fields. @@ -2781,11 +2871,10 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) * Returns: * 0 for success, error for failure. */ -int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, - int vm_context, u32 pasid) +int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid) { - struct amdgpu_bo_param bp; - struct amdgpu_bo *root; + struct amdgpu_bo *root_bo; + struct amdgpu_bo_vm *root; int r, i; vm->va = RB_ROOT_CACHED; @@ -2816,16 +2905,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->pte_support_ats = false; vm->is_compute_context = false; - if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { - vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & - AMDGPU_VM_USE_CPU_FOR_COMPUTE); + vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & + AMDGPU_VM_USE_CPU_FOR_GFX); - if (adev->asic_type == CHIP_RAVEN) - vm->pte_support_ats = true; - } else { - vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & - AMDGPU_VM_USE_CPU_FOR_GFX); - } DRM_DEBUG_DRIVER("VM update mode is %s\n", vm->use_cpu_for_update ? "CPU" : "SDMA"); WARN_ONCE((vm->use_cpu_for_update && @@ -2842,28 +2924,26 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, mutex_init(&vm->eviction_lock); vm->evicting = false; - amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp); - if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) - bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW; - r = amdgpu_bo_create(adev, &bp, &root); + r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, + false, &root); if (r) goto error_free_delayed; - - r = amdgpu_bo_reserve(root, true); + root_bo = &root->bo; + r = amdgpu_bo_reserve(root_bo, true); if (r) goto error_free_root; - r = dma_resv_reserve_shared(root->tbo.base.resv, 1); + r = dma_resv_reserve_shared(root_bo->tbo.base.resv, 1); if (r) goto error_unreserve; - amdgpu_vm_bo_base_init(&vm->root.base, vm, root); + amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); r = amdgpu_vm_clear_bo(adev, vm, root, false); if (r) goto error_unreserve; - amdgpu_bo_unreserve(vm->root.base.bo); + amdgpu_bo_unreserve(vm->root.bo); if (pasid) { unsigned long flags; @@ -2883,12 +2963,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, return 0; error_unreserve: - amdgpu_bo_unreserve(vm->root.base.bo); + amdgpu_bo_unreserve(vm->root.bo); error_free_root: - amdgpu_bo_unref(&vm->root.base.bo->shadow); - amdgpu_bo_unref(&vm->root.base.bo); - vm->root.base.bo = NULL; + amdgpu_bo_unref(&root->shadow); + amdgpu_bo_unref(&root_bo); + vm->root.bo = NULL; error_free_delayed: dma_fence_put(vm->last_unlocked); @@ -2914,17 +2994,14 @@ error_free_immediate: * 0 if this VM is clean */ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, - struct amdgpu_vm *vm) + struct amdgpu_vm *vm) { enum amdgpu_vm_level root = adev->vm_manager.root_level; unsigned int entries = amdgpu_vm_num_entries(adev, root); unsigned int i = 0; - if (!(vm->root.entries)) - return 0; - for (i = 0; i < entries; i++) { - if (vm->root.entries[i].base.bo) + if (to_amdgpu_bo_vm(vm->root.bo)->entries[i].bo) return -EINVAL; } @@ -2958,7 +3035,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); int r; - r = amdgpu_bo_reserve(vm->root.base.bo, true); + r = amdgpu_bo_reserve(vm->root.bo, true); if (r) return r; @@ -2985,7 +3062,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, */ if (pte_support_ats != vm->pte_support_ats) { vm->pte_support_ats = pte_support_ats; - r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false); + r = amdgpu_vm_clear_bo(adev, vm, + to_amdgpu_bo_vm(vm->root.bo), + false); if (r) goto free_idr; } @@ -3001,7 +3080,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, if (vm->use_cpu_for_update) { /* Sync with last SDMA update/clear before switching to CPU */ - r = amdgpu_bo_sync_wait(vm->root.base.bo, + r = amdgpu_bo_sync_wait(vm->root.bo, AMDGPU_FENCE_OWNER_UNDEFINED, true); if (r) goto free_idr; @@ -3029,7 +3108,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, } /* Free the shadow bo for compute VM */ - amdgpu_bo_unref(&vm->root.base.bo->shadow); + amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow); if (pasid) vm->pasid = pasid; @@ -3045,7 +3124,7 @@ free_idr: spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); } unreserve_bo: - amdgpu_bo_unreserve(vm->root.base.bo); + amdgpu_bo_unreserve(vm->root.bo); return r; } @@ -3088,7 +3167,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); - root = amdgpu_bo_ref(vm->root.base.bo); + root = amdgpu_bo_ref(vm->root.bo); amdgpu_bo_reserve(root, true); if (vm->pasid) { unsigned long flags; @@ -3115,7 +3194,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_vm_free_pts(adev, vm, NULL); amdgpu_bo_unreserve(root); amdgpu_bo_unref(&root); - WARN_ON(vm->root.base.bo); + WARN_ON(vm->root.bo); drm_sched_entity_destroy(&vm->immediate); drm_sched_entity_destroy(&vm->delayed); @@ -3232,7 +3311,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) /* Wait vm idle to make sure the vmid set in SPM_VMID is * not referenced anymore. */ - r = amdgpu_bo_reserve(fpriv->vm.root.base.bo, true); + r = amdgpu_bo_reserve(fpriv->vm.root.bo, true); if (r) return r; @@ -3240,7 +3319,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (r < 0) return r; - amdgpu_bo_unreserve(fpriv->vm.root.base.bo); + amdgpu_bo_unreserve(fpriv->vm.root.bo); amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); break; default: @@ -3304,47 +3383,57 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, uint64_t addr) { + bool is_compute_context = false; struct amdgpu_bo *root; + unsigned long irqflags; uint64_t value, flags; struct amdgpu_vm *vm; int r; - spin_lock(&adev->vm_manager.pasid_lock); + spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags); vm = idr_find(&adev->vm_manager.pasid_idr, pasid); - if (vm) - root = amdgpu_bo_ref(vm->root.base.bo); - else + if (vm) { + root = amdgpu_bo_ref(vm->root.bo); + is_compute_context = vm->is_compute_context; + } else { root = NULL; - spin_unlock(&adev->vm_manager.pasid_lock); + } + spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags); if (!root) return false; + addr /= AMDGPU_GPU_PAGE_SIZE; + + if (is_compute_context && + !svm_range_restore_pages(adev, pasid, addr)) { + amdgpu_bo_unref(&root); + return true; + } + r = amdgpu_bo_reserve(root, true); if (r) goto error_unref; /* Double check that the VM still exists */ - spin_lock(&adev->vm_manager.pasid_lock); + spin_lock_irqsave(&adev->vm_manager.pasid_lock, irqflags); vm = idr_find(&adev->vm_manager.pasid_idr, pasid); - if (vm && vm->root.base.bo != root) + if (vm && vm->root.bo != root) vm = NULL; - spin_unlock(&adev->vm_manager.pasid_lock); + spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, irqflags); if (!vm) goto error_unlock; - addr /= AMDGPU_GPU_PAGE_SIZE; flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED | AMDGPU_PTE_SYSTEM; - if (vm->is_compute_context) { + if (is_compute_context) { /* Intentionally setting invalid PTE flag * combination to force a no-retry-fault */ flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | AMDGPU_PTE_TF; value = 0; - } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { /* Redirect the access to the dummy page */ value = adev->dummy_page_addr; @@ -3363,7 +3452,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, } r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr, - addr, flags, value, NULL, NULL, + addr, flags, value, NULL, NULL, NULL, NULL); if (r) goto error_unlock; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 4e140288159c..ddb85a85cbba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -39,6 +39,7 @@ struct amdgpu_bo_va; struct amdgpu_job; struct amdgpu_bo_list_entry; +struct amdgpu_bo_vm; /* * GPUVM handling @@ -121,9 +122,6 @@ struct amdgpu_bo_list_entry; /* max vmids dedicated for process */ #define AMDGPU_VM_MAX_RESERVED_VMID 1 -#define AMDGPU_VM_CONTEXT_GFX 0 -#define AMDGPU_VM_CONTEXT_COMPUTE 1 - /* See vm_update_mode */ #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0) #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1) @@ -154,13 +152,6 @@ struct amdgpu_vm_bo_base { bool moved; }; -struct amdgpu_vm_pt { - struct amdgpu_vm_bo_base base; - - /* array of page tables, one for each directory entry */ - struct amdgpu_vm_pt *entries; -}; - /* provided by hw blocks that can write ptes, e.g., sdma */ struct amdgpu_vm_pte_funcs { /* number of dw to reserve per operation */ @@ -234,14 +225,19 @@ struct amdgpu_vm_update_params { * @num_dw_left: number of dw left for the IB */ unsigned int num_dw_left; + + /** + * @table_freed: return true if page table is freed when updating + */ + bool table_freed; }; struct amdgpu_vm_update_funcs { - int (*map_table)(struct amdgpu_bo *bo); + int (*map_table)(struct amdgpu_bo_vm *bo); int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv, enum amdgpu_sync_mode sync_mode); int (*update)(struct amdgpu_vm_update_params *p, - struct amdgpu_bo *bo, uint64_t pe, uint64_t addr, + struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags); int (*commit)(struct amdgpu_vm_update_params *p, struct dma_fence **fence); @@ -281,7 +277,7 @@ struct amdgpu_vm { struct list_head done; /* contains the page directory */ - struct amdgpu_vm_pt root; + struct amdgpu_vm_bo_base root; struct dma_fence *last_update; /* Scheduler entities for page table updates */ @@ -367,6 +363,8 @@ struct amdgpu_vm_manager { spinlock_t pasid_lock; }; +struct amdgpu_bo_va_mapping; + #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) @@ -378,8 +376,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev); void amdgpu_vm_manager_fini(struct amdgpu_device *adev); long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout); -int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, - int vm_context, u32 pasid); +int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid); int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid); void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); @@ -398,9 +395,18 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct dma_fence **fence); int amdgpu_vm_handle_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm); +int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, + struct amdgpu_device *bo_adev, + struct amdgpu_vm *vm, bool immediate, + bool unlocked, struct dma_resv *resv, + uint64_t start, uint64_t last, + uint64_t flags, uint64_t offset, + struct ttm_resource *res, + dma_addr_t *pages_addr, + struct dma_fence **fence, bool *free_table); int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - bool clear); + bool clear, bool *table_freed); bool amdgpu_vm_evictable(struct amdgpu_bo *bo); void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo, bool evicted); @@ -447,6 +453,8 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo); +void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, + uint64_t *gtt_mem, uint64_t *cpu_mem); #if defined(CONFIG_DEBUG_FS) void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c index ac45d9c7a4e9..e3fbf0f10add 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c @@ -29,9 +29,9 @@ * * @table: newly allocated or validated PD/PT */ -static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table) +static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table) { - return amdgpu_bo_kmap(table, NULL); + return amdgpu_bo_kmap(&table->bo, NULL); } /** @@ -58,7 +58,7 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, * amdgpu_vm_cpu_update - helper to update page tables via CPU * * @p: see amdgpu_vm_update_params definition - * @bo: PD/PT to update + * @vmbo: PD/PT to update * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB * @addr: dst addr to write into pe * @count: number of page entries to update @@ -68,7 +68,7 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, * Write count number of PT/PD entries directly. */ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p, - struct amdgpu_bo *bo, uint64_t pe, + struct amdgpu_bo_vm *vmbo, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags) { @@ -76,13 +76,13 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p, uint64_t value; int r; - if (bo->tbo.moving) { - r = dma_fence_wait(bo->tbo.moving, true); + if (vmbo->bo.tbo.moving) { + r = dma_fence_wait(vmbo->bo.tbo.moving, true); if (r) return r; } - pe += (unsigned long)amdgpu_bo_kptr(bo); + pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo); trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate); @@ -110,7 +110,7 @@ static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p, { /* Flush HDP */ mb(); - amdgpu_asic_flush_hdp(p->adev, NULL); + amdgpu_device_flush_hdp(p->adev, NULL); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index a83a646759c5..dbb551762805 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -33,11 +33,11 @@ * * @table: newly allocated or validated PD/PT */ -static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table) +static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table) { int r; - r = amdgpu_ttm_alloc_gart(&table->tbo); + r = amdgpu_ttm_alloc_gart(&table->bo.tbo); if (r) return r; @@ -112,7 +112,7 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, swap(p->vm->last_unlocked, f); dma_fence_put(tmp); } else { - amdgpu_bo_fence(p->vm->root.base.bo, f, true); + amdgpu_bo_fence(p->vm->root.bo, f, true); } if (fence && !p->immediate) @@ -186,7 +186,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p, * amdgpu_vm_sdma_update - execute VM update * * @p: see amdgpu_vm_update_params definition - * @bo: PD/PT to update + * @vmbo: PD/PT to update * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB * @addr: dst addr to write into pe * @count: number of page entries to update @@ -197,10 +197,11 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p, * the IB. */ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, - struct amdgpu_bo *bo, uint64_t pe, + struct amdgpu_bo_vm *vmbo, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags) { + struct amdgpu_bo *bo = &vmbo->bo; enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE : AMDGPU_IB_POOL_DELAYED; unsigned int i, ndw, nptes; @@ -238,8 +239,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, if (!p->pages_addr) { /* set page commands needed */ - if (bo->shadow) - amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr, + if (vmbo->shadow) + amdgpu_vm_sdma_set_ptes(p, vmbo->shadow, pe, addr, count, incr, flags); amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count, incr, flags); @@ -248,7 +249,7 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, /* copy commands needed */ ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw * - (bo->shadow ? 2 : 1); + (vmbo->shadow ? 2 : 1); /* for padding */ ndw -= 7; @@ -263,8 +264,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, pte[i] |= flags; } - if (bo->shadow) - amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes); + if (vmbo->shadow) + amdgpu_vm_sdma_copy_ptes(p, vmbo->shadow, pe, nptes); amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes); pe += nptes * 8; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index bce105e2973e..436ec246a7da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -23,18 +23,27 @@ */ #include <linux/dma-mapping.h> +#include <drm/ttm/ttm_range_manager.h> + #include "amdgpu.h" #include "amdgpu_vm.h" #include "amdgpu_res_cursor.h" #include "amdgpu_atomfirmware.h" #include "atom.h" -static inline struct amdgpu_vram_mgr *to_vram_mgr(struct ttm_resource_manager *man) +struct amdgpu_vram_reservation { + struct list_head node; + struct drm_mm_node mm_node; +}; + +static inline struct amdgpu_vram_mgr * +to_vram_mgr(struct ttm_resource_manager *man) { return container_of(man, struct amdgpu_vram_mgr, manager); } -static inline struct amdgpu_device *to_amdgpu_device(struct amdgpu_vram_mgr *mgr) +static inline struct amdgpu_device * +to_amdgpu_device(struct amdgpu_vram_mgr *mgr) { return container_of(mgr, struct amdgpu_device, mman.vram_mgr); } @@ -82,12 +91,14 @@ static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev, * amount of currently used VRAM in bytes */ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man; + man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_usage(man)); } @@ -100,18 +111,28 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev, * amount of currently used visible VRAM in bytes */ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man; + man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_vis_usage(man)); } +/** + * DOC: mem_info_vram_vendor + * + * The amdgpu driver provides a sysfs API for reporting the vendor of the + * installed VRAM + * The file mem_info_vram_vendor is used for this and returns the name of the + * vendor. + */ static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev, - struct device_attribute *attr, - char *buf) + struct device_attribute *attr, + char *buf) { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); @@ -153,7 +174,7 @@ static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO, static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO, amdgpu_mem_info_vram_vendor, NULL); -static const struct attribute *amdgpu_vram_mgr_attributes[] = { +static struct attribute *amdgpu_vram_mgr_attributes[] = { &dev_attr_mem_info_vram_total.attr, &dev_attr_mem_info_vis_vram_total.attr, &dev_attr_mem_info_vram_used.attr, @@ -162,77 +183,9 @@ static const struct attribute *amdgpu_vram_mgr_attributes[] = { NULL }; -static const struct ttm_resource_manager_func amdgpu_vram_mgr_func; - -/** - * amdgpu_vram_mgr_init - init VRAM manager and DRM MM - * - * @adev: amdgpu_device pointer - * - * Allocate and initialize the VRAM manager. - */ -int amdgpu_vram_mgr_init(struct amdgpu_device *adev) -{ - struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; - struct ttm_resource_manager *man = &mgr->manager; - int ret; - - ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT); - - man->func = &amdgpu_vram_mgr_func; - - drm_mm_init(&mgr->mm, 0, man->size); - spin_lock_init(&mgr->lock); - INIT_LIST_HEAD(&mgr->reservations_pending); - INIT_LIST_HEAD(&mgr->reserved_pages); - - /* Add the two VRAM-related sysfs files */ - ret = sysfs_create_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes); - if (ret) - DRM_ERROR("Failed to register sysfs\n"); - - ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager); - ttm_resource_manager_set_used(man, true); - return 0; -} - -/** - * amdgpu_vram_mgr_fini - free and destroy VRAM manager - * - * @adev: amdgpu_device pointer - * - * Destroy and free the VRAM manager, returns -EBUSY if ranges are still - * allocated inside it. - */ -void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) -{ - struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; - struct ttm_resource_manager *man = &mgr->manager; - int ret; - struct amdgpu_vram_reservation *rsv, *temp; - - ttm_resource_manager_set_used(man, false); - - ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man); - if (ret) - return; - - spin_lock(&mgr->lock); - list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) - kfree(rsv); - - list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) { - drm_mm_remove_node(&rsv->mm_node); - kfree(rsv); - } - drm_mm_takedown(&mgr->mm); - spin_unlock(&mgr->lock); - - sysfs_remove_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes); - - ttm_resource_manager_cleanup(man); - ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL); -} +const struct attribute_group amdgpu_vram_mgr_attr_group = { + .attrs = amdgpu_vram_mgr_attributes +}; /** * amdgpu_vram_mgr_vis_size - Calculate visible node size @@ -266,23 +219,25 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - struct ttm_resource *mem = &bo->tbo.mem; - struct drm_mm_node *nodes = mem->mm_node; - unsigned pages = mem->num_pages; + struct ttm_resource *res = bo->tbo.resource; + unsigned pages = res->num_pages; + struct drm_mm_node *mm; u64 usage; if (amdgpu_gmc_vram_full_visible(&adev->gmc)) return amdgpu_bo_size(bo); - if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) + if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) return 0; - for (usage = 0; nodes && pages; pages -= nodes->size, nodes++) - usage += amdgpu_vram_mgr_vis_size(adev, nodes); + mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0]; + for (usage = 0; pages; pages -= mm->size, mm++) + usage += amdgpu_vram_mgr_vis_size(adev, mm); return usage; } +/* Commit the reservation of VRAM pages */ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man) { struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); @@ -413,15 +368,15 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem, static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, - struct ttm_resource *mem) + struct ttm_resource **res) { + unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages; struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = to_amdgpu_device(mgr); + uint64_t vis_usage = 0, mem_bytes, max_bytes; + struct ttm_range_mgr_node *node; struct drm_mm *mm = &mgr->mm; - struct drm_mm_node *nodes; enum drm_mm_insert_mode mode; - unsigned long lpfn, num_nodes, pages_per_node, pages_left; - uint64_t vis_usage = 0, mem_bytes, max_bytes; unsigned i; int r; @@ -434,10 +389,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, max_bytes -= AMDGPU_VM_RESERVED_VRAM; /* bail out quickly if there's likely not enough VRAM for this BO */ - mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; + mem_bytes = tbo->base.size; if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) { - atomic64_sub(mem_bytes, &mgr->usage); - return -ENOSPC; + r = -ENOSPC; + goto error_sub; } if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { @@ -448,78 +403,78 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, pages_per_node = HPAGE_PMD_NR; #else /* default to 2MB */ - pages_per_node = (2UL << (20UL - PAGE_SHIFT)); + pages_per_node = 2UL << (20UL - PAGE_SHIFT); #endif - pages_per_node = max((uint32_t)pages_per_node, mem->page_alignment); - num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); + pages_per_node = max_t(uint32_t, pages_per_node, + tbo->page_alignment); + num_nodes = DIV_ROUND_UP_ULL(PFN_UP(mem_bytes), pages_per_node); } - nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes), - GFP_KERNEL | __GFP_ZERO); - if (!nodes) { - atomic64_sub(mem_bytes, &mgr->usage); - return -ENOMEM; + node = kvmalloc(struct_size(node, mm_nodes, num_nodes), + GFP_KERNEL | __GFP_ZERO); + if (!node) { + r = -ENOMEM; + goto error_sub; } + ttm_resource_init(tbo, place, &node->base); + mode = DRM_MM_INSERT_BEST; if (place->flags & TTM_PL_FLAG_TOPDOWN) mode = DRM_MM_INSERT_HIGH; - mem->start = 0; - pages_left = mem->num_pages; + pages_left = node->base.num_pages; - spin_lock(&mgr->lock); - for (i = 0; pages_left >= pages_per_node; ++i) { - unsigned long pages = rounddown_pow_of_two(pages_left); - - /* Limit maximum size to 2GB due to SG table limitations */ - pages = min(pages, (2UL << (30 - PAGE_SHIFT))); - - r = drm_mm_insert_node_in_range(mm, &nodes[i], pages, - pages_per_node, 0, - place->fpfn, lpfn, - mode); - if (unlikely(r)) - break; - - vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]); - amdgpu_vram_mgr_virt_start(mem, &nodes[i]); - pages_left -= pages; - } + /* Limit maximum size to 2GB due to SG table limitations */ + pages = min(pages_left, 2UL << (30 - PAGE_SHIFT)); - for (; pages_left; ++i) { - unsigned long pages = min(pages_left, pages_per_node); - uint32_t alignment = mem->page_alignment; + i = 0; + spin_lock(&mgr->lock); + while (pages_left) { + uint32_t alignment = tbo->page_alignment; - if (pages == pages_per_node) + if (pages >= pages_per_node) alignment = pages_per_node; - r = drm_mm_insert_node_in_range(mm, &nodes[i], - pages, alignment, 0, - place->fpfn, lpfn, - mode); - if (unlikely(r)) - goto error; + r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages, + alignment, 0, place->fpfn, + lpfn, mode); + if (unlikely(r)) { + if (pages > pages_per_node) { + if (is_power_of_2(pages)) + pages = pages / 2; + else + pages = rounddown_pow_of_two(pages); + continue; + } + goto error_free; + } - vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]); - amdgpu_vram_mgr_virt_start(mem, &nodes[i]); + vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]); + amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]); pages_left -= pages; + ++i; + + if (pages > pages_left) + pages = pages_left; } spin_unlock(&mgr->lock); - atomic64_add(vis_usage, &mgr->vis_usage); - - mem->mm_node = nodes; + if (i == 1) + node->base.placement |= TTM_PL_FLAG_CONTIGUOUS; + atomic64_add(vis_usage, &mgr->vis_usage); + *res = &node->base; return 0; -error: +error_free: while (i--) - drm_mm_remove_node(&nodes[i]); + drm_mm_remove_node(&node->mm_nodes[i]); spin_unlock(&mgr->lock); - atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage); + kvfree(node); - kvfree(nodes); +error_sub: + atomic64_sub(mem_bytes, &mgr->usage); return r; } @@ -532,24 +487,22 @@ error: * Free the allocated VRAM again. */ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, - struct ttm_resource *mem) + struct ttm_resource *res) { + struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res); struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); struct amdgpu_device *adev = to_amdgpu_device(mgr); - struct drm_mm_node *nodes = mem->mm_node; uint64_t usage = 0, vis_usage = 0; - unsigned pages = mem->num_pages; - - if (!mem->mm_node) - return; + unsigned i, pages; spin_lock(&mgr->lock); - while (pages) { - pages -= nodes->size; - drm_mm_remove_node(nodes); - usage += nodes->size << PAGE_SHIFT; - vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes); - ++nodes; + for (i = 0, pages = res->num_pages; pages; + pages -= node->mm_nodes[i].size, ++i) { + struct drm_mm_node *mm = &node->mm_nodes[i]; + + drm_mm_remove_node(mm); + usage += mm->size << PAGE_SHIFT; + vis_usage += amdgpu_vram_mgr_vis_size(adev, mm); } amdgpu_vram_mgr_do_reserve(man); spin_unlock(&mgr->lock); @@ -557,8 +510,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, atomic64_sub(usage, &mgr->usage); atomic64_sub(vis_usage, &mgr->vis_usage); - kvfree(mem->mm_node); - mem->mm_node = NULL; + kvfree(node); } /** @@ -575,7 +527,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, * Allocate and fill a sg table from a VRAM allocation. */ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, - struct ttm_resource *mem, + struct ttm_resource *res, u64 offset, u64 length, struct device *dev, enum dma_data_direction dir, @@ -591,7 +543,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, return -ENOMEM; /* Determine the number of DRM_MM nodes to export */ - amdgpu_res_first(mem, offset, length, &cursor); + amdgpu_res_first(res, offset, length, &cursor); while (cursor.remaining) { num_entries++; amdgpu_res_next(&cursor, cursor.size); @@ -611,7 +563,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, * and the number of bytes from it. Access the following * DRM_MM node(s) if more buffer needs to exported */ - amdgpu_res_first(mem, offset, length, &cursor); + amdgpu_res_first(res, offset, length, &cursor); for_each_sgtable_sg((*sgt), sg, i) { phys_addr_t phys = cursor.start + adev->gmc.aper_base; size_t size = cursor.size; @@ -727,3 +679,65 @@ static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { .free = amdgpu_vram_mgr_del, .debug = amdgpu_vram_mgr_debug }; + +/** + * amdgpu_vram_mgr_init - init VRAM manager and DRM MM + * + * @adev: amdgpu_device pointer + * + * Allocate and initialize the VRAM manager. + */ +int amdgpu_vram_mgr_init(struct amdgpu_device *adev) +{ + struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; + struct ttm_resource_manager *man = &mgr->manager; + + ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT); + + man->func = &amdgpu_vram_mgr_func; + + drm_mm_init(&mgr->mm, 0, man->size); + spin_lock_init(&mgr->lock); + INIT_LIST_HEAD(&mgr->reservations_pending); + INIT_LIST_HEAD(&mgr->reserved_pages); + + ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager); + ttm_resource_manager_set_used(man, true); + return 0; +} + +/** + * amdgpu_vram_mgr_fini - free and destroy VRAM manager + * + * @adev: amdgpu_device pointer + * + * Destroy and free the VRAM manager, returns -EBUSY if ranges are still + * allocated inside it. + */ +void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) +{ + struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; + struct ttm_resource_manager *man = &mgr->manager; + int ret; + struct amdgpu_vram_reservation *rsv, *temp; + + ttm_resource_manager_set_used(man, false); + + ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man); + if (ret) + return; + + spin_lock(&mgr->lock); + list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) + kfree(rsv); + + list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) { + drm_mm_remove_node(&rsv->mm_node); + kfree(rsv); + } + drm_mm_takedown(&mgr->mm); + spin_unlock(&mgr->lock); + + ttm_resource_manager_cleanup(man); + ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index 1a8f6d4baab2..a434c71fde8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -56,6 +56,8 @@ #define AMD_SRIOV_MSG_RESERVE_UCODE 24 +#define AMD_SRIOV_MSG_RESERVE_VCN_INST 4 + enum amd_sriov_ucode_engine_id { AMD_SRIOV_UCODE_ID_VCE = 0, AMD_SRIOV_UCODE_ID_UVD, @@ -98,10 +100,10 @@ union amd_sriov_msg_feature_flags { union amd_sriov_reg_access_flags { struct { - uint32_t vf_reg_access_ih : 1; + uint32_t vf_reg_access_ih : 1; uint32_t vf_reg_access_mmhub : 1; - uint32_t vf_reg_access_gc : 1; - uint32_t reserved : 29; + uint32_t vf_reg_access_gc : 1; + uint32_t reserved : 29; } flags; uint32_t all; }; @@ -114,6 +116,37 @@ union amd_sriov_msg_os_info { uint32_t all; }; +struct amd_sriov_msg_uuid_info { + union { + struct { + uint32_t did : 16; + uint32_t fcn : 8; + uint32_t asic_7 : 8; + }; + uint32_t time_low; + }; + + struct { + uint32_t time_mid : 16; + uint32_t time_high : 12; + uint32_t version : 4; + }; + + struct { + struct { + uint8_t clk_seq_hi : 6; + uint8_t variant : 2; + }; + union { + uint8_t clk_seq_low; + uint8_t asic_6; + }; + uint16_t asic_4; + }; + + uint32_t asic_0; +}; + struct amd_sriov_msg_pf2vf_info_header { /* the total structure size in byte */ uint32_t size; @@ -160,10 +193,19 @@ struct amd_sriov_msg_pf2vf_info { /* identification in ROCm SMI */ uint64_t uuid; uint32_t fcn_idx; - /* flags which indicate the register access method VF should use */ + /* flags to indicate which register access method VF should use */ union amd_sriov_reg_access_flags reg_access_flags; + /* MM BW management */ + struct { + uint32_t decode_max_dimension_pixels; + uint32_t decode_max_frame_pixels; + uint32_t encode_max_dimension_pixels; + uint32_t encode_max_frame_pixels; + } mm_bw_management[AMD_SRIOV_MSG_RESERVE_VCN_INST]; + /* UUID info */ + struct amd_sriov_msg_uuid_info uuid_info; /* reserved */ - uint32_t reserved[256-27]; + uint32_t reserved[256 - 47]; }; struct amd_sriov_msg_vf2pf_info_header { diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c index 2ac4988ea0ff..c12c2900732b 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c @@ -74,6 +74,7 @@ int athub_v2_1_set_clockgating(struct amdgpu_device *adev, case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: athub_v2_1_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); athub_v2_1_update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE); break; diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c index 3dcb8b32f48b..6fa2229b7229 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.c +++ b/drivers/gpu/drm/amd/amdgpu/atom.c @@ -31,6 +31,7 @@ #define ATOM_DEBUG +#include "atomfirmware.h" #include "atom.h" #include "atom-names.h" #include "atom-bits.h" @@ -1299,12 +1300,168 @@ static void atom_index_iio(struct atom_context *ctx, int base) } } +static void atom_get_vbios_name(struct atom_context *ctx) +{ + unsigned char *p_rom; + unsigned char str_num; + unsigned short off_to_vbios_str; + unsigned char *c_ptr; + int name_size; + int i; + + const char *na = "--N/A--"; + char *back; + + p_rom = ctx->bios; + + str_num = *(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS); + if (str_num != 0) { + off_to_vbios_str = + *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START); + + c_ptr = (unsigned char *)(p_rom + off_to_vbios_str); + } else { + /* do not know where to find name */ + memcpy(ctx->name, na, 7); + ctx->name[7] = 0; + return; + } + + /* + * skip the atombios strings, usually 4 + * 1st is P/N, 2nd is ASIC, 3rd is PCI type, 4th is Memory type + */ + for (i = 0; i < str_num; i++) { + while (*c_ptr != 0) + c_ptr++; + c_ptr++; + } + + /* skip the following 2 chars: 0x0D 0x0A */ + c_ptr += 2; + + name_size = strnlen(c_ptr, STRLEN_LONG - 1); + memcpy(ctx->name, c_ptr, name_size); + back = ctx->name + name_size; + while ((*--back) == ' ') + ; + *(back + 1) = '\0'; +} + +static void atom_get_vbios_date(struct atom_context *ctx) +{ + unsigned char *p_rom; + unsigned char *date_in_rom; + + p_rom = ctx->bios; + + date_in_rom = p_rom + OFFSET_TO_VBIOS_DATE; + + ctx->date[0] = '2'; + ctx->date[1] = '0'; + ctx->date[2] = date_in_rom[6]; + ctx->date[3] = date_in_rom[7]; + ctx->date[4] = '/'; + ctx->date[5] = date_in_rom[0]; + ctx->date[6] = date_in_rom[1]; + ctx->date[7] = '/'; + ctx->date[8] = date_in_rom[3]; + ctx->date[9] = date_in_rom[4]; + ctx->date[10] = ' '; + ctx->date[11] = date_in_rom[9]; + ctx->date[12] = date_in_rom[10]; + ctx->date[13] = date_in_rom[11]; + ctx->date[14] = date_in_rom[12]; + ctx->date[15] = date_in_rom[13]; + ctx->date[16] = '\0'; +} + +static unsigned char *atom_find_str_in_rom(struct atom_context *ctx, char *str, int start, + int end, int maxlen) +{ + unsigned long str_off; + unsigned char *p_rom; + unsigned short str_len; + + str_off = 0; + str_len = strnlen(str, maxlen); + p_rom = ctx->bios; + + for (; start <= end; ++start) { + for (str_off = 0; str_off < str_len; ++str_off) { + if (str[str_off] != *(p_rom + start + str_off)) + break; + } + + if (str_off == str_len || str[str_off] == 0) + return p_rom + start; + } + return NULL; +} + +static void atom_get_vbios_pn(struct atom_context *ctx) +{ + unsigned char *p_rom; + unsigned short off_to_vbios_str; + unsigned char *vbios_str; + int count; + + off_to_vbios_str = 0; + p_rom = ctx->bios; + + if (*(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS) != 0) { + off_to_vbios_str = + *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START); + + vbios_str = (unsigned char *)(p_rom + off_to_vbios_str); + } else { + vbios_str = p_rom + OFFSET_TO_VBIOS_PART_NUMBER; + } + + if (*vbios_str == 0) { + vbios_str = atom_find_str_in_rom(ctx, BIOS_ATOM_PREFIX, 3, 1024, 64); + if (vbios_str == NULL) + vbios_str += sizeof(BIOS_ATOM_PREFIX) - 1; + } + if (vbios_str != NULL && *vbios_str == 0) + vbios_str++; + + if (vbios_str != NULL) { + count = 0; + while ((count < BIOS_STRING_LENGTH) && vbios_str[count] >= ' ' && + vbios_str[count] <= 'z') { + ctx->vbios_pn[count] = vbios_str[count]; + count++; + } + + ctx->vbios_pn[count] = 0; + } +} + +static void atom_get_vbios_version(struct atom_context *ctx) +{ + unsigned char *vbios_ver; + + /* find anchor ATOMBIOSBK-AMD */ + vbios_ver = atom_find_str_in_rom(ctx, BIOS_VERSION_PREFIX, 3, 1024, 64); + if (vbios_ver != NULL) { + /* skip ATOMBIOSBK-AMD VER */ + vbios_ver += 18; + memcpy(ctx->vbios_ver_str, vbios_ver, STRLEN_NORMAL); + } else { + ctx->vbios_ver_str[0] = '\0'; + } +} + struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios) { int base; struct atom_context *ctx = kzalloc(sizeof(struct atom_context), GFP_KERNEL); char *str; + struct _ATOM_ROM_HEADER *atom_rom_header; + struct _ATOM_MASTER_DATA_TABLE *master_table; + struct _ATOM_FIRMWARE_INFO *atom_fw_info; u16 idx; if (!ctx) @@ -1353,6 +1510,21 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios) strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version)); } + atom_rom_header = (struct _ATOM_ROM_HEADER *)CSTR(base); + if (atom_rom_header->usMasterDataTableOffset != 0) { + master_table = (struct _ATOM_MASTER_DATA_TABLE *) + CSTR(atom_rom_header->usMasterDataTableOffset); + if (master_table->ListOfDataTables.FirmwareInfo != 0) { + atom_fw_info = (struct _ATOM_FIRMWARE_INFO *) + CSTR(master_table->ListOfDataTables.FirmwareInfo); + ctx->version = atom_fw_info->ulFirmwareRevision; + } + } + + atom_get_vbios_name(ctx); + atom_get_vbios_pn(ctx); + atom_get_vbios_date(ctx); + atom_get_vbios_version(ctx); return ctx; } diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h index d279759cab47..0c1839824520 100644 --- a/drivers/gpu/drm/amd/amdgpu/atom.h +++ b/drivers/gpu/drm/amd/amdgpu/atom.h @@ -112,6 +112,10 @@ struct drm_device; #define ATOM_IO_SYSIO 2 #define ATOM_IO_IIO 0x80 +#define STRLEN_NORMAL 32 +#define STRLEN_LONG 64 +#define STRLEN_VERYLONG 254 + struct card_info { struct drm_device *dev; void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */ @@ -140,6 +144,12 @@ struct atom_context { uint32_t *scratch; int scratch_size_bytes; char vbios_version[20]; + + uint8_t name[STRLEN_LONG]; + uint8_t vbios_pn[STRLEN_LONG]; + uint32_t version; + uint8_t vbios_ver_str[STRLEN_NORMAL]; + uint8_t date[STRLEN_NORMAL]; }; extern int amdgpu_atom_debug; diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index a3ba9ca11e98..f327becb022f 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -188,6 +188,8 @@ void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector) { amdgpu_connector->ddc_bus->rec.hpd = amdgpu_connector->hpd.hpd; amdgpu_connector->ddc_bus->aux.transfer = amdgpu_atombios_dp_aux_transfer; + amdgpu_connector->ddc_bus->aux.drm_dev = amdgpu_connector->base.dev; + drm_dp_aux_init(&amdgpu_connector->ddc_bus->aux); amdgpu_connector->ddc_bus->has_aux = true; } @@ -610,7 +612,7 @@ amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_i dp_info->tries = 0; voltage = 0xff; while (1) { - drm_dp_link_train_clock_recovery_delay(dp_info->dpcd); + drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd); if (drm_dp_dpcd_read_link_status(dp_info->aux, dp_info->link_status) <= 0) { @@ -675,7 +677,7 @@ amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_i dp_info->tries = 0; channel_eq = false; while (1) { - drm_dp_link_train_channel_eq_delay(dp_info->dpcd); + drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd); if (drm_dp_dpcd_read_link_status(dp_info->aux, dp_info->link_status) <= 0) { diff --git a/drivers/gpu/drm/amd/amdgpu/beige_goby_reg_init.c b/drivers/gpu/drm/amd/amdgpu/beige_goby_reg_init.c new file mode 100644 index 000000000000..608a113ce354 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/beige_goby_reg_init.c @@ -0,0 +1,54 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "nv.h" + +#include "soc15_common.h" +#include "soc15_hw_ip.h" +#include "beige_goby_ip_offset.h" + +int beige_goby_reg_base_init(struct amdgpu_device *adev) +{ + /* HW has more IP blocks, only initialize the block needed by driver */ + uint32_t i; + for (i = 0 ; i < MAX_INSTANCE ; ++i) { + adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); + adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); + adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); + adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); + adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); + adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); + adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN0_BASE.instance[i])); + adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); + adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i])); + adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); + adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[SDMA2_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[SDMA3_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); + adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); + } + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index d3745711d55f..df385ffc9768 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -309,8 +309,7 @@ static int cik_ih_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev, &adev->irq.ih); + amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index c4bb8eed246d..c8ebd108548d 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -720,7 +720,7 @@ err0: } /** - * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART + * cik_sdma_vm_copy_pte - update PTEs by copying them from the GART * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry @@ -746,7 +746,7 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib, } /** - * cik_sdma_vm_write_pages - update PTEs by writing them manually + * cik_sdma_vm_write_pte - update PTEs by writing them manually * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry @@ -775,7 +775,7 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, } /** - * cik_sdma_vm_set_pages - update the page tables using sDMA + * cik_sdma_vm_set_pte_pde - update the page tables using sDMA * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry @@ -804,7 +804,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, } /** - * cik_sdma_vm_pad_ib - pad the IB to the required number of dw + * cik_sdma_ring_pad_ib - pad the IB to the required number of dw * * @ring: amdgpu_ring structure holding ring information * @ib: indirect buffer to fill with padding diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index 307c01301c87..b8c47e0cf37a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -301,8 +301,7 @@ static int cz_ih_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev, &adev->irq.ih); + amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index dbcb09cf83e6..c7803dc2b2d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -456,7 +456,7 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder) } /** - * cik_get_number_of_dram_channels - get the number of dram channels + * si_get_number_of_dram_channels - get the number of dram channels * * @adev: amdgpu_device pointer * diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 5c11144da051..33324427b555 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -421,6 +421,11 @@ static int dce_virtual_sw_init(void *handle) static int dce_virtual_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i = 0; + + for (i = 0; i < adev->mode_info.num_crtc; i++) + if (adev->mode_info.crtcs[i]) + hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer); kfree(adev->mode_info.bios_hardcoded_edid); @@ -480,13 +485,6 @@ static int dce_virtual_hw_init(void *handle) static int dce_virtual_hw_fini(void *handle) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i = 0; - - for (i = 0; i<adev->mode_info.num_crtc; i++) - if (adev->mode_info.crtcs[i]) - hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index 0d8459d63bac..14514a145c17 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -219,11 +219,11 @@ static void df_v3_6_query_hashes(struct amdgpu_device *adev) adev->df.hash_status.hash_2m = false; adev->df.hash_status.hash_1g = false; - if (adev->asic_type != CHIP_ARCTURUS) - return; - - /* encoding for hash-enabled on Arcturus */ - if (adev->df.funcs->get_fb_channel_number(adev) == 0xe) { + /* encoding for hash-enabled on Arcturus and Aldebaran */ + if ((adev->asic_type == CHIP_ARCTURUS && + adev->df.funcs->get_fb_channel_number(adev) == 0xe) || + (adev->asic_type == CHIP_ALDEBARAN && + adev->df.funcs->get_fb_channel_number(adev) == 0x1e)) { tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DfGlobalCtrl); adev->df.hash_status.hash_64k = REG_GET_FIELD(tmp, DF_CS_UMC_AON0_DfGlobalCtrl, @@ -277,8 +277,14 @@ static u32 df_v3_6_get_fb_channel_number(struct amdgpu_device *adev) { u32 tmp; - tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0); - tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK; + if (adev->asic_type == CHIP_ALDEBARAN) { + tmp = RREG32_SOC15(DF, 0, mmDF_GCM_AON0_DramMegaBaseAddress0); + tmp &= + ALDEBARAN_DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK; + } else { + tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0); + tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK; + } tmp >>= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; return tmp; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 0597aeb5f0e8..2d56b60bc058 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -47,7 +47,7 @@ #include "gfx_v10_0.h" #include "nbio_v2_3.h" -/** +/* * Navi10 has two graphic rings to share each graphic pipe. * 1. Primary ring * 2. Async ring @@ -235,6 +235,20 @@ MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_mec.bin"); MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_mec2.bin"); MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_rlc.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_ce.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_pfp.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_me.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_mec.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_mec2.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_rlc.bin"); + +MODULE_FIRMWARE("amdgpu/yellow_carp_ce.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_pfp.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_me.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_mec.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_mec2.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_rlc.bin"); + static const struct soc15_reg_golden golden_settings_gc_10_1[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014), @@ -1428,38 +1442,36 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000) }; -static bool gfx_v10_is_rlcg_rw(struct amdgpu_device *adev, u32 offset, uint32_t *flag, bool write) -{ - /* always programed by rlcg, only for gc */ - if (offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI) || - offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO) || - offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH) || - offset == SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL) || - offset == SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX) || - offset == SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL)) { - if (!amdgpu_sriov_reg_indirect_gc(adev)) - *flag = GFX_RLCG_GC_WRITE_OLD; - else - *flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ; +static bool gfx_v10_get_rlcg_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip, + int write, u32 *rlcg_flag) +{ + switch (hwip) { + case GC_HWIP: + if (amdgpu_sriov_reg_indirect_gc(adev)) { + *rlcg_flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ; - return true; - } + return true; + /* only in new version, AMDGPU_REGS_NO_KIQ and AMDGPU_REGS_RLC enabled simultaneously */ + } else if ((acc_flags & AMDGPU_REGS_RLC) && !(acc_flags & AMDGPU_REGS_NO_KIQ)) { + *rlcg_flag = GFX_RLCG_GC_WRITE_OLD; - /* currently support gc read/write, mmhub write */ - if (offset >= SOC15_REG_OFFSET(GC, 0, mmSDMA0_DEC_START) && - offset <= SOC15_REG_OFFSET(GC, 0, mmRLC_GTS_OFFSET_MSB)) { - if (amdgpu_sriov_reg_indirect_gc(adev)) - *flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ; - else - return false; - } else { - if (amdgpu_sriov_reg_indirect_mmhub(adev)) - *flag = GFX_RLCG_MMHUB_WRITE; - else - return false; + return true; + } + + break; + case MMHUB_HWIP: + if (amdgpu_sriov_reg_indirect_mmhub(adev) && + (acc_flags & AMDGPU_REGS_RLC) && write) { + *rlcg_flag = GFX_RLCG_MMHUB_WRITE; + return true; + } + + break; + default: + DRM_DEBUG("Not program register by RLCG\n"); } - return true; + return false; } static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag) @@ -1526,36 +1538,34 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32 return ret; } -static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 flag) +static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 acc_flags, u32 hwip) { - uint32_t rlcg_flag; + u32 rlcg_flag; - if (amdgpu_sriov_fullaccess(adev) && - gfx_v10_is_rlcg_rw(adev, offset, &rlcg_flag, 1)) { + if (!amdgpu_sriov_runtime(adev) && + gfx_v10_get_rlcg_flag(adev, acc_flags, hwip, 1, &rlcg_flag)) { gfx_v10_rlcg_rw(adev, offset, value, rlcg_flag); - return; } - if (flag & AMDGPU_REGS_NO_KIQ) + + if (acc_flags & AMDGPU_REGS_NO_KIQ) WREG32_NO_KIQ(offset, value); else WREG32(offset, value); } -static u32 gfx_v10_rlcg_rreg(struct amdgpu_device *adev, u32 offset, u32 flag) +static u32 gfx_v10_rlcg_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip) { - uint32_t rlcg_flag; + u32 rlcg_flag; - if (amdgpu_sriov_fullaccess(adev) && - gfx_v10_is_rlcg_rw(adev, offset, &rlcg_flag, 0)) + if (!amdgpu_sriov_runtime(adev) && + gfx_v10_get_rlcg_flag(adev, acc_flags, hwip, 0, &rlcg_flag)) return gfx_v10_rlcg_rw(adev, offset, 0, rlcg_flag); - if (flag & AMDGPU_REGS_NO_KIQ) + if (acc_flags & AMDGPU_REGS_NO_KIQ) return RREG32_NO_KIQ(offset); else return RREG32(offset); - - return 0; } static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] = @@ -3377,6 +3387,30 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020), }; +static const struct soc15_reg_golden golden_settings_gc_10_3_3[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0xff7f0fff, 0x78000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCH_PIPE_STEER, 0x000000ff, 0x000000e4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c200), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xfffffff3), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0xff8fff0f, 0x580f1008), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xf7ffffff, 0x00f80988), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x000001ff, 0x00000020), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0xf17fffff, 0x01200007), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_TIMEOUT_COUNTER, 0xffffffff, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0xffffffbf, 0x00000820), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00100000) +}; + static const struct soc15_reg_golden golden_settings_gc_10_3_4[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100), @@ -3416,6 +3450,41 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_4[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x00000020, 0x00000020) }; +static const struct soc15_reg_golden golden_settings_gc_10_3_5[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CS_CLK_CTRL, 0x78000000, 0x78000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xb0000ff0, 0x30000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff000000, 0x7e000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CM_CTRL1, 0xff8fff0f, 0x580f1008), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2C_CTRL3, 0xf7ffffff, 0x00f80988), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmLDS_CONFIG, 0x000001ff, 0x00000020), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_CL_ENHANCE, 0xf17fffff, 0x01200007), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xe07df47f, 0x00180070), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER0_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER1_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER10_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER11_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER12_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER13_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER14_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER15_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER2_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER3_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER4_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER5_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER6_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX,0xfff7ffff, 0x01030000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000) +}; + #define DEFAULT_SH_MEM_CONFIG \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ @@ -3631,11 +3700,21 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_10_3_vangogh, (const u32)ARRAY_SIZE(golden_settings_gc_10_3_vangogh)); break; + case CHIP_YELLOW_CARP: + soc15_program_register_sequence(adev, + golden_settings_gc_10_3_3, + (const u32)ARRAY_SIZE(golden_settings_gc_10_3_3)); + break; case CHIP_DIMGREY_CAVEFISH: soc15_program_register_sequence(adev, golden_settings_gc_10_3_4, (const u32)ARRAY_SIZE(golden_settings_gc_10_3_4)); break; + case CHIP_BEIGE_GOBY: + soc15_program_register_sequence(adev, + golden_settings_gc_10_3_5, + (const u32)ARRAY_SIZE(golden_settings_gc_10_3_5)); + break; default: break; } @@ -3821,6 +3900,8 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: adev->gfx.cp_fw_write_wait = true; break; default: @@ -3897,7 +3978,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) { const char *chip_name; char fw_name[40]; - char wks[10]; + char *wks = ""; int err; struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; @@ -3910,7 +3991,6 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); - memset(wks, 0, sizeof(wks)); switch (adev->asic_type) { case CHIP_NAVI10: chip_name = "navi10"; @@ -3919,7 +3999,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) chip_name = "navi14"; if (!(adev->pdev->device == 0x7340 && adev->pdev->revision != 0x00)) - snprintf(wks, sizeof(wks), "_wks"); + wks = "_wks"; break; case CHIP_NAVI12: chip_name = "navi12"; @@ -3936,6 +4016,12 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) case CHIP_DIMGREY_CAVEFISH: chip_name = "dimgrey_cavefish"; break; + case CHIP_BEIGE_GOBY: + chip_name = "beige_goby"; + break; + case CHIP_YELLOW_CARP: + chip_name = "yellow_carp"; + break; default: BUG(); } @@ -4504,6 +4590,8 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -4628,6 +4716,8 @@ static int gfx_v10_0_sw_init(void *handle) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 1; adev->gfx.me.num_queue_per_pipe = 1; @@ -4860,7 +4950,8 @@ static void gfx_v10_0_setup_rb(struct amdgpu_device *adev) for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { bitmap = i * adev->gfx.config.max_sh_per_se + j; - if ((adev->asic_type == CHIP_SIENNA_CICHLID) && + if (((adev->asic_type == CHIP_SIENNA_CICHLID) || + (adev->asic_type == CHIP_YELLOW_CARP)) && ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1)) continue; gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff); @@ -5190,10 +5281,10 @@ static void gfx_v10_0_rlc_enable_srm(struct amdgpu_device *adev) uint32_t tmp; /* enable Save Restore Machine */ - tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL)); + tmp = RREG32_SOC15(GC, 0, mmRLC_SRM_CNTL); tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK; - WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp); + WREG32_SOC15(GC, 0, mmRLC_SRM_CNTL, tmp); } static int gfx_v10_0_rlc_load_microcode(struct amdgpu_device *adev) @@ -6135,6 +6226,8 @@ static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev, case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, DOORBELL_RANGE_LOWER_Sienna_Cichlid, ring->doorbell_index); WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp); @@ -6270,6 +6363,8 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid, 0); break; default: @@ -6282,6 +6377,8 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); @@ -6378,6 +6475,8 @@ static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid); tmp &= 0xffffff00; tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); @@ -7092,6 +7191,7 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev) case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid); WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid, 0); WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern); @@ -7105,6 +7205,7 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev) } break; case CHIP_VANGOGH: + case CHIP_YELLOW_CARP: return true; default: data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE); @@ -7126,6 +7227,9 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev) { uint32_t data; + if (amdgpu_sriov_vf(adev)) + return; + /* initialize cam_index to 0 * index will auto-inc after each data writting */ WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0); @@ -7135,6 +7239,8 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: /* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */ data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) << GRBM_CAM_DATA__CAM_ADDR__SHIFT) | @@ -7344,7 +7450,7 @@ static int gfx_v10_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); - if (!adev->in_pci_err_recovery) { + if (!adev->no_hw_access) { #ifndef BRING_UP_DEBUG if (amdgpu_async_gfx_ring) { r = gfx_v10_0_kiq_disable_kgq(adev); @@ -7450,6 +7556,8 @@ static int gfx_v10_0_soft_reset(void *handle) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY_Sienna_Cichlid)) grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, @@ -7503,6 +7611,7 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev) mutex_lock(&adev->gfx.gpu_clock_mutex); switch (adev->asic_type) { case CHIP_VANGOGH: + case CHIP_YELLOW_CARP: clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) | ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL); break; @@ -7559,6 +7668,8 @@ static int gfx_v10_0_early_init(void *handle) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_Sienna_Cichlid; break; default: @@ -7615,6 +7726,8 @@ static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data); /* wait for RLC_SAFE_MODE */ @@ -7649,6 +7762,8 @@ static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data); break; default: @@ -7891,12 +8006,12 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev, static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid) { u32 reg, data; - + /* not for *_SOC15 */ reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL); if (amdgpu_sriov_is_pp_one_vf(adev)) data = RREG32_NO_KIQ(reg); else - data = RREG32(reg); + data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL); data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; @@ -7951,12 +8066,23 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable) * in refclk count. Note that RLC FW is modified to take 16 bits from * RLC_PG_DELAY_3[15:0] as the hysteresis instead of just 8 bits. * - * The recommendation from RLC team is setting RLC_PG_DELAY_3 to 200us(0x4E20) - * as part of CGPG enablement starting point. + * The recommendation from RLC team is setting RLC_PG_DELAY_3 to 200us as part) + * of CGPG enablement starting point. + * Power/performance team will optimize it and might give a new value later. */ - if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && adev->asic_type == CHIP_VANGOGH) { - data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh; - WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data); + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { + switch (adev->asic_type) { + case CHIP_VANGOGH: + data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh; + WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data); + break; + case CHIP_YELLOW_CARP: + data = 0x1388 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh; + WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data); + break; + default: + break; + } } } @@ -8016,9 +8142,11 @@ static int gfx_v10_0_set_powergating_state(void *handle, case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: amdgpu_gfx_off_ctrl(adev, enable); break; case CHIP_VANGOGH: + case CHIP_YELLOW_CARP: gfx_v10_cntl_pg(adev, enable); amdgpu_gfx_off_ctrl(adev, enable); break; @@ -8044,6 +8172,8 @@ static int gfx_v10_0_set_clockgating_state(void *handle, case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: gfx_v10_0_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE); break; @@ -8636,16 +8766,16 @@ gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - cp_int_cntl = RREG32(cp_int_cntl_reg); + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE, 0); - WREG32(cp_int_cntl_reg, cp_int_cntl); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); break; case AMDGPU_IRQ_STATE_ENABLE: - cp_int_cntl = RREG32(cp_int_cntl_reg); + cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg); cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, TIME_STAMP_INT_ENABLE, 1); - WREG32(cp_int_cntl_reg, cp_int_cntl); + WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl); break; default: break; @@ -8689,16 +8819,16 @@ static void gfx_v10_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev switch (state) { case AMDGPU_IRQ_STATE_DISABLE: - mec_int_cntl = RREG32(mec_int_cntl_reg); + mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE, 0); - WREG32(mec_int_cntl_reg, mec_int_cntl); + WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); break; case AMDGPU_IRQ_STATE_ENABLE: - mec_int_cntl = RREG32(mec_int_cntl_reg); + mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg); mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, TIME_STAMP_INT_ENABLE, 1); - WREG32(mec_int_cntl_reg, mec_int_cntl); + WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl); break; default: break; @@ -8894,20 +9024,20 @@ static int gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device *adev, GENERIC2_INT_ENABLE, 0); WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp); - tmp = RREG32(target); + tmp = RREG32_SOC15_IP(GC, target); tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL, GENERIC2_INT_ENABLE, 0); - WREG32(target, tmp); + WREG32_SOC15_IP(GC, target, tmp); } else { tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL); tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, GENERIC2_INT_ENABLE, 1); WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp); - tmp = RREG32(target); + tmp = RREG32_SOC15_IP(GC, target); tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL, GENERIC2_INT_ENABLE, 1); - WREG32(target, tmp); + WREG32_SOC15_IP(GC, target, tmp); } break; default: @@ -9150,13 +9280,15 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_NAVI10: case CHIP_NAVI14: - case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs; break; case CHIP_NAVI12: + case CHIP_SIENNA_CICHLID: adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs_sriov; break; default: @@ -9239,7 +9371,8 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev, for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { bitmap = i * adev->gfx.config.max_sh_per_se + j; - if ((adev->asic_type == CHIP_SIENNA_CICHLID) && + if (((adev->asic_type == CHIP_SIENNA_CICHLID) || + (adev->asic_type == CHIP_YELLOW_CARP)) && ((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1)) continue; mask = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index c35fdd2ef2d4..685212c3ddae 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2116,7 +2116,7 @@ error_free_scratch: } /** - * gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp + * gfx_v7_0_ring_emit_hdp_flush - emit an hdp flush on the cp * * @ring: amdgpu_ring structure holding ring information * @@ -2242,7 +2242,7 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring, * IB stuff */ /** - * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring + * gfx_v7_0_ring_emit_ib_gfx - emit an IB (Indirect Buffer) on the ring * * @ring: amdgpu_ring structure holding ring information * @job: job to retrieve vmid from @@ -3196,7 +3196,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev) } /** - * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP + * gfx_v7_0_ring_emit_pipeline_sync - cik vm flush using the CP * * @ring: the ring to emit the commands to * diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 516467e962b7..044076ec1d03 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -734,7 +734,7 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] = mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0, }; -static void gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag) +static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag) { static void *scratch_reg0; static void *scratch_reg1; @@ -787,15 +787,17 @@ static void gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 } -static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag) +static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, + u32 v, u32 acc_flags, u32 hwip) { - if (amdgpu_sriov_fullaccess(adev)) { - gfx_v9_0_rlcg_rw(adev, offset, v, flag); + if ((acc_flags & AMDGPU_REGS_RLC) && + amdgpu_sriov_fullaccess(adev)) { + gfx_v9_0_rlcg_w(adev, offset, v, acc_flags); return; } - if (flag & AMDGPU_REGS_NO_KIQ) + if (acc_flags & AMDGPU_REGS_NO_KIQ) WREG32_NO_KIQ(offset, v); else WREG32(offset, v); @@ -3937,7 +3939,8 @@ static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev) { u32 tmp; - if (adev->asic_type != CHIP_ARCTURUS) + if (adev->asic_type != CHIP_ARCTURUS && + adev->asic_type != CHIP_ALDEBARAN) return; tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG); @@ -4559,8 +4562,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) if (!ring->sched.ready) return 0; - if (adev->asic_type == CHIP_ARCTURUS || - adev->asic_type == CHIP_ALDEBARAN) { + if (adev->asic_type == CHIP_ARCTURUS) { vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus; vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus); vgpr_init_regs_ptr = vgpr_init_regs_arcturus; @@ -4745,7 +4747,11 @@ static int gfx_v9_0_ecc_late_init(void *handle) } /* requires IBs so do in late init after IB pool is initialized */ - r = gfx_v9_0_do_edc_gpr_workarounds(adev); + if (adev->asic_type == CHIP_ALDEBARAN) + r = gfx_v9_4_2_do_edc_gpr_workarounds(adev); + else + r = gfx_v9_0_do_edc_gpr_workarounds(adev); + if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index a30c7c10cd9a..1769c4cba2ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -22,6 +22,7 @@ */ #include "amdgpu.h" #include "soc15.h" +#include "soc15d.h" #include "gc/gc_9_4_2_offset.h" #include "gc/gc_9_4_2_sh_mask.h" @@ -31,6 +32,11 @@ #include "amdgpu_ras.h" #include "amdgpu_gfx.h" +#define SE_ID_MAX 8 +#define CU_ID_MAX 16 +#define SIMD_ID_MAX 4 +#define WAVE_ID_MAX 10 + enum gfx_v9_4_2_utc_type { VML2_MEM, VML2_WALKER_MEM, @@ -79,6 +85,634 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, regTCI_CNTL_3, 0xff, 0x20), }; +/** + * This shader is used to clear VGPRS and LDS, and also write the input + * pattern into the write back buffer, which will be used by driver to + * check whether all SIMDs have been covered. +*/ +static const u32 vgpr_init_compute_shader_aldebaran[] = { + 0xb8840904, 0xb8851a04, 0xb8861344, 0xb8831804, 0x9208ff06, 0x00000280, + 0x9209a805, 0x920a8a04, 0x81080908, 0x81080a08, 0x81080308, 0x8e078208, + 0x81078407, 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8a0000, 0xd3d94000, + 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080, 0xd3d94003, + 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080, 0xd3d94006, + 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080, 0xd3d94009, + 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080, 0xd3d9400c, + 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080, 0xd3d9400f, + 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080, 0xd3d94012, + 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080, 0xd3d94015, + 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080, 0xd3d94018, + 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080, 0xd3d9401b, + 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080, 0xd3d9401e, + 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080, 0xd3d94021, + 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080, 0xd3d94024, + 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080, 0xd3d94027, + 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080, 0xd3d9402a, + 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080, 0xd3d9402d, + 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080, 0xd3d94030, + 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080, 0xd3d94033, + 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080, 0xd3d94036, + 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080, 0xd3d94039, + 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080, 0xd3d9403c, + 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080, 0xd3d9403f, + 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080, 0xd3d94042, + 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080, 0xd3d94045, + 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080, 0xd3d94048, + 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080, 0xd3d9404b, + 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080, 0xd3d9404e, + 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080, 0xd3d94051, + 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080, 0xd3d94054, + 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080, 0xd3d94057, + 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080, 0xd3d9405a, + 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080, 0xd3d9405d, + 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080, 0xd3d94060, + 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080, 0xd3d94063, + 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080, 0xd3d94066, + 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080, 0xd3d94069, + 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080, 0xd3d9406c, + 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080, 0xd3d9406f, + 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080, 0xd3d94072, + 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080, 0xd3d94075, + 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080, 0xd3d94078, + 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080, 0xd3d9407b, + 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080, 0xd3d9407e, + 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080, 0xd3d94081, + 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080, 0xd3d94084, + 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080, 0xd3d94087, + 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080, 0xd3d9408a, + 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080, 0xd3d9408d, + 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080, 0xd3d94090, + 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080, 0xd3d94093, + 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080, 0xd3d94096, + 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080, 0xd3d94099, + 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080, 0xd3d9409c, + 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080, 0xd3d9409f, + 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080, 0xd3d940a2, + 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080, 0xd3d940a5, + 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080, 0xd3d940a8, + 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080, 0xd3d940ab, + 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080, 0xd3d940ae, + 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080, 0xd3d940b1, + 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080, 0xd3d940b4, + 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080, 0xd3d940b7, + 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080, 0xd3d940ba, + 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080, 0xd3d940bd, + 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080, 0xd3d940c0, + 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080, 0xd3d940c3, + 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080, 0xd3d940c6, + 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080, 0xd3d940c9, + 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080, 0xd3d940cc, + 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080, 0xd3d940cf, + 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080, 0xd3d940d2, + 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080, 0xd3d940d5, + 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080, 0xd3d940d8, + 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080, 0xd3d940db, + 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080, 0xd3d940de, + 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080, 0xd3d940e1, + 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080, 0xd3d940e4, + 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080, 0xd3d940e7, + 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080, 0xd3d940ea, + 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080, 0xd3d940ed, + 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080, 0xd3d940f0, + 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080, 0xd3d940f3, + 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080, 0xd3d940f6, + 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080, 0xd3d940f9, + 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080, 0xd3d940fc, + 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080, 0xd3d940ff, + 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a, 0x7e000280, + 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280, 0x7e0c0280, + 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000, 0xd28c0001, + 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xbe8b0004, 0xb78b4000, + 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000, 0x00020201, + 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a, 0xbf84fff8, + 0xbf810000, +}; + +const struct soc15_reg_entry vgpr_init_regs_aldebaran[] = { + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Y), 4 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Z), 1 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC1), 0xbf }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC2), 0x400006 }, /* 64KB LDS */ + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC3), 0x3F }, /* 63 - accum-offset = 256 */ + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff }, +}; + +/** + * The below shaders are used to clear SGPRS, and also write the input + * pattern into the write back buffer. The first two dispatch should be + * scheduled simultaneously which make sure that all SGPRS could be + * allocated, so the dispatch 1 need check write back buffer before scheduled, + * make sure that waves of dispatch 0 are all dispacthed to all simds + * balanced. both dispatch 0 and dispatch 1 should be halted until all waves + * are dispatched, and then driver write a pattern to the shared memory to make + * all waves continue. +*/ +static const u32 sgpr112_init_compute_shader_aldebaran[] = { + 0xb8840904, 0xb8851a04, 0xb8861344, 0xb8831804, 0x9208ff06, 0x00000280, + 0x9209a805, 0x920a8a04, 0x81080908, 0x81080a08, 0x81080308, 0x8e078208, + 0x81078407, 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8e003f, 0xc0030200, + 0x00000000, 0xbf8c0000, 0xbf06ff08, 0xdeadbeaf, 0xbf84fff9, 0x81028102, + 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8a0000, 0xbefc0080, 0xbeea0080, + 0xbeeb0080, 0xbf00f280, 0xbee60080, 0xbee70080, 0xbee80080, 0xbee90080, + 0xbefe0080, 0xbeff0080, 0xbe880080, 0xbe890080, 0xbe8a0080, 0xbe8b0080, + 0xbe8c0080, 0xbe8d0080, 0xbe8e0080, 0xbe8f0080, 0xbe900080, 0xbe910080, + 0xbe920080, 0xbe930080, 0xbe940080, 0xbe950080, 0xbe960080, 0xbe970080, + 0xbe980080, 0xbe990080, 0xbe9a0080, 0xbe9b0080, 0xbe9c0080, 0xbe9d0080, + 0xbe9e0080, 0xbe9f0080, 0xbea00080, 0xbea10080, 0xbea20080, 0xbea30080, + 0xbea40080, 0xbea50080, 0xbea60080, 0xbea70080, 0xbea80080, 0xbea90080, + 0xbeaa0080, 0xbeab0080, 0xbeac0080, 0xbead0080, 0xbeae0080, 0xbeaf0080, + 0xbeb00080, 0xbeb10080, 0xbeb20080, 0xbeb30080, 0xbeb40080, 0xbeb50080, + 0xbeb60080, 0xbeb70080, 0xbeb80080, 0xbeb90080, 0xbeba0080, 0xbebb0080, + 0xbebc0080, 0xbebd0080, 0xbebe0080, 0xbebf0080, 0xbec00080, 0xbec10080, + 0xbec20080, 0xbec30080, 0xbec40080, 0xbec50080, 0xbec60080, 0xbec70080, + 0xbec80080, 0xbec90080, 0xbeca0080, 0xbecb0080, 0xbecc0080, 0xbecd0080, + 0xbece0080, 0xbecf0080, 0xbed00080, 0xbed10080, 0xbed20080, 0xbed30080, + 0xbed40080, 0xbed50080, 0xbed60080, 0xbed70080, 0xbed80080, 0xbed90080, + 0xbeda0080, 0xbedb0080, 0xbedc0080, 0xbedd0080, 0xbede0080, 0xbedf0080, + 0xbee00080, 0xbee10080, 0xbee20080, 0xbee30080, 0xbee40080, 0xbee50080, + 0xbf810000 +}; + +const struct soc15_reg_entry sgpr112_init_regs_aldebaran[] = { + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Y), 8 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Z), 1 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC1), 0x340 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC2), 0x6 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC3), 0x0 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff }, +}; + +static const u32 sgpr96_init_compute_shader_aldebaran[] = { + 0xb8840904, 0xb8851a04, 0xb8861344, 0xb8831804, 0x9208ff06, 0x00000280, + 0x9209a805, 0x920a8a04, 0x81080908, 0x81080a08, 0x81080308, 0x8e078208, + 0x81078407, 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8e003f, 0xc0030200, + 0x00000000, 0xbf8c0000, 0xbf06ff08, 0xdeadbeaf, 0xbf84fff9, 0x81028102, + 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8a0000, 0xbefc0080, 0xbeea0080, + 0xbeeb0080, 0xbf00f280, 0xbee60080, 0xbee70080, 0xbee80080, 0xbee90080, + 0xbefe0080, 0xbeff0080, 0xbe880080, 0xbe890080, 0xbe8a0080, 0xbe8b0080, + 0xbe8c0080, 0xbe8d0080, 0xbe8e0080, 0xbe8f0080, 0xbe900080, 0xbe910080, + 0xbe920080, 0xbe930080, 0xbe940080, 0xbe950080, 0xbe960080, 0xbe970080, + 0xbe980080, 0xbe990080, 0xbe9a0080, 0xbe9b0080, 0xbe9c0080, 0xbe9d0080, + 0xbe9e0080, 0xbe9f0080, 0xbea00080, 0xbea10080, 0xbea20080, 0xbea30080, + 0xbea40080, 0xbea50080, 0xbea60080, 0xbea70080, 0xbea80080, 0xbea90080, + 0xbeaa0080, 0xbeab0080, 0xbeac0080, 0xbead0080, 0xbeae0080, 0xbeaf0080, + 0xbeb00080, 0xbeb10080, 0xbeb20080, 0xbeb30080, 0xbeb40080, 0xbeb50080, + 0xbeb60080, 0xbeb70080, 0xbeb80080, 0xbeb90080, 0xbeba0080, 0xbebb0080, + 0xbebc0080, 0xbebd0080, 0xbebe0080, 0xbebf0080, 0xbec00080, 0xbec10080, + 0xbec20080, 0xbec30080, 0xbec40080, 0xbec50080, 0xbec60080, 0xbec70080, + 0xbec80080, 0xbec90080, 0xbeca0080, 0xbecb0080, 0xbecc0080, 0xbecd0080, + 0xbece0080, 0xbecf0080, 0xbed00080, 0xbed10080, 0xbed20080, 0xbed30080, + 0xbed40080, 0xbed50080, 0xbed60080, 0xbed70080, 0xbed80080, 0xbed90080, + 0xbf810000, +}; + +const struct soc15_reg_entry sgpr96_init_regs_aldebaran[] = { + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Y), 0xc }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Z), 1 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC1), 0x2c0 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC2), 0x6 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC3), 0x0 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff }, +}; + +/** + * This shader is used to clear the uninitiated sgprs after the above + * two dispatches, because of hardware feature, dispath 0 couldn't clear + * top hole sgprs. Therefore need 4 waves per SIMD to cover these sgprs +*/ +static const u32 sgpr64_init_compute_shader_aldebaran[] = { + 0xb8840904, 0xb8851a04, 0xb8861344, 0xb8831804, 0x9208ff06, 0x00000280, + 0x9209a805, 0x920a8a04, 0x81080908, 0x81080a08, 0x81080308, 0x8e078208, + 0x81078407, 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8e003f, 0xc0030200, + 0x00000000, 0xbf8c0000, 0xbf06ff08, 0xdeadbeaf, 0xbf84fff9, 0x81028102, + 0xc0410080, 0x00000007, 0xbf8c0000, 0xbf8a0000, 0xbefc0080, 0xbeea0080, + 0xbeeb0080, 0xbf00f280, 0xbee60080, 0xbee70080, 0xbee80080, 0xbee90080, + 0xbefe0080, 0xbeff0080, 0xbe880080, 0xbe890080, 0xbe8a0080, 0xbe8b0080, + 0xbe8c0080, 0xbe8d0080, 0xbe8e0080, 0xbe8f0080, 0xbe900080, 0xbe910080, + 0xbe920080, 0xbe930080, 0xbe940080, 0xbe950080, 0xbe960080, 0xbe970080, + 0xbe980080, 0xbe990080, 0xbe9a0080, 0xbe9b0080, 0xbe9c0080, 0xbe9d0080, + 0xbe9e0080, 0xbe9f0080, 0xbea00080, 0xbea10080, 0xbea20080, 0xbea30080, + 0xbea40080, 0xbea50080, 0xbea60080, 0xbea70080, 0xbea80080, 0xbea90080, + 0xbeaa0080, 0xbeab0080, 0xbeac0080, 0xbead0080, 0xbeae0080, 0xbeaf0080, + 0xbeb00080, 0xbeb10080, 0xbeb20080, 0xbeb30080, 0xbeb40080, 0xbeb50080, + 0xbeb60080, 0xbeb70080, 0xbeb80080, 0xbeb90080, 0xbf810000, +}; + +const struct soc15_reg_entry sgpr64_init_regs_aldebaran[] = { + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_X), 0x40 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Y), 0x10 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_NUM_THREAD_Z), 1 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC1), 0x1c0 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC2), 0x6 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_PGM_RSRC3), 0x0 }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff }, + { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff }, +}; + +static int gfx_v9_4_2_run_shader(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_ib *ib, + const u32 *shader_ptr, u32 shader_size, + const struct soc15_reg_entry *init_regs, u32 regs_size, + u32 compute_dim_x, u64 wb_gpu_addr, u32 pattern, + struct dma_fence **fence_ptr) +{ + int r, i; + uint32_t total_size, shader_offset; + u64 gpu_addr; + + total_size = (regs_size * 3 + 4 + 5 + 5) * 4; + total_size = ALIGN(total_size, 256); + shader_offset = total_size; + total_size += ALIGN(shader_size, 256); + + /* allocate an indirect buffer to put the commands in */ + memset(ib, 0, sizeof(*ib)); + r = amdgpu_ib_get(adev, NULL, total_size, + AMDGPU_IB_POOL_DIRECT, ib); + if (r) { + dev_err(adev->dev, "failed to get ib (%d).\n", r); + return r; + } + + /* load the compute shaders */ + for (i = 0; i < shader_size/sizeof(u32); i++) + ib->ptr[i + (shader_offset / 4)] = shader_ptr[i]; + + /* init the ib length to 0 */ + ib->length_dw = 0; + + /* write the register state for the compute dispatch */ + for (i = 0; i < regs_size; i++) { + ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); + ib->ptr[ib->length_dw++] = SOC15_REG_ENTRY_OFFSET(init_regs[i]) + - PACKET3_SET_SH_REG_START; + ib->ptr[ib->length_dw++] = init_regs[i].reg_value; + } + + /* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */ + gpu_addr = (ib->gpu_addr + (u64)shader_offset) >> 8; + ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 2); + ib->ptr[ib->length_dw++] = SOC15_REG_OFFSET(GC, 0, regCOMPUTE_PGM_LO) + - PACKET3_SET_SH_REG_START; + ib->ptr[ib->length_dw++] = lower_32_bits(gpu_addr); + ib->ptr[ib->length_dw++] = upper_32_bits(gpu_addr); + + /* write the wb buffer address */ + ib->ptr[ib->length_dw++] = PACKET3(PACKET3_SET_SH_REG, 3); + ib->ptr[ib->length_dw++] = SOC15_REG_OFFSET(GC, 0, regCOMPUTE_USER_DATA_0) + - PACKET3_SET_SH_REG_START; + ib->ptr[ib->length_dw++] = lower_32_bits(wb_gpu_addr); + ib->ptr[ib->length_dw++] = upper_32_bits(wb_gpu_addr); + ib->ptr[ib->length_dw++] = pattern; + + /* write dispatch packet */ + ib->ptr[ib->length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); + ib->ptr[ib->length_dw++] = compute_dim_x; /* x */ + ib->ptr[ib->length_dw++] = 1; /* y */ + ib->ptr[ib->length_dw++] = 1; /* z */ + ib->ptr[ib->length_dw++] = + REG_SET_FIELD(0, COMPUTE_DISPATCH_INITIATOR, COMPUTE_SHADER_EN, 1); + + /* shedule the ib on the ring */ + r = amdgpu_ib_schedule(ring, 1, ib, NULL, fence_ptr); + if (r) { + dev_err(adev->dev, "ib submit failed (%d).\n", r); + amdgpu_ib_free(adev, ib, NULL); + } + return r; +} + +static void gfx_v9_4_2_log_wave_assignment(struct amdgpu_device *adev, uint32_t *wb_ptr) +{ + uint32_t se, cu, simd, wave; + uint32_t offset = 0; + char *str; + int size; + + str = kmalloc(256, GFP_KERNEL); + if (!str) + return; + + dev_dbg(adev->dev, "wave assignment:\n"); + + for (se = 0; se < adev->gfx.config.max_shader_engines; se++) { + for (cu = 0; cu < CU_ID_MAX; cu++) { + memset(str, 0, 256); + size = sprintf(str, "SE[%02d]CU[%02d]: ", se, cu); + for (simd = 0; simd < SIMD_ID_MAX; simd++) { + size += sprintf(str + size, "["); + for (wave = 0; wave < WAVE_ID_MAX; wave++) { + size += sprintf(str + size, "%x", wb_ptr[offset]); + offset++; + } + size += sprintf(str + size, "] "); + } + dev_dbg(adev->dev, "%s\n", str); + } + } + + kfree(str); +} + +static int gfx_v9_4_2_wait_for_waves_assigned(struct amdgpu_device *adev, + uint32_t *wb_ptr, uint32_t mask, + uint32_t pattern, uint32_t num_wave, bool wait) +{ + uint32_t se, cu, simd, wave; + uint32_t loop = 0; + uint32_t wave_cnt; + uint32_t offset; + + do { + wave_cnt = 0; + offset = 0; + + for (se = 0; se < adev->gfx.config.max_shader_engines; se++) + for (cu = 0; cu < CU_ID_MAX; cu++) + for (simd = 0; simd < SIMD_ID_MAX; simd++) + for (wave = 0; wave < WAVE_ID_MAX; wave++) { + if (((1 << wave) & mask) && + (wb_ptr[offset] == pattern)) + wave_cnt++; + + offset++; + } + + if (wave_cnt == num_wave) + return 0; + + mdelay(1); + } while (++loop < 2000 && wait); + + dev_err(adev->dev, "actual wave num: %d, expected wave num: %d\n", + wave_cnt, num_wave); + + gfx_v9_4_2_log_wave_assignment(adev, wb_ptr); + + return -EBADSLT; +} + +static int gfx_v9_4_2_do_sgprs_init(struct amdgpu_device *adev) +{ + int r; + int wb_size = adev->gfx.config.max_shader_engines * + CU_ID_MAX * SIMD_ID_MAX * WAVE_ID_MAX; + struct amdgpu_ib wb_ib; + struct amdgpu_ib disp_ibs[3]; + struct dma_fence *fences[3]; + u32 pattern[3] = { 0x1, 0x5, 0xa }; + + /* bail if the compute ring is not ready */ + if (!adev->gfx.compute_ring[0].sched.ready || + !adev->gfx.compute_ring[1].sched.ready) + return 0; + + /* allocate the write-back buffer from IB */ + memset(&wb_ib, 0, sizeof(wb_ib)); + r = amdgpu_ib_get(adev, NULL, (1 + wb_size) * sizeof(uint32_t), + AMDGPU_IB_POOL_DIRECT, &wb_ib); + if (r) { + dev_err(adev->dev, "failed to get ib (%d) for wb\n", r); + return r; + } + memset(wb_ib.ptr, 0, (1 + wb_size) * sizeof(uint32_t)); + + r = gfx_v9_4_2_run_shader(adev, + &adev->gfx.compute_ring[0], + &disp_ibs[0], + sgpr112_init_compute_shader_aldebaran, + sizeof(sgpr112_init_compute_shader_aldebaran), + sgpr112_init_regs_aldebaran, + ARRAY_SIZE(sgpr112_init_regs_aldebaran), + adev->gfx.cu_info.number, + wb_ib.gpu_addr, pattern[0], &fences[0]); + if (r) { + dev_err(adev->dev, "failed to clear first 224 sgprs\n"); + goto pro_end; + } + + r = gfx_v9_4_2_wait_for_waves_assigned(adev, + &wb_ib.ptr[1], 0b11, + pattern[0], + adev->gfx.cu_info.number * SIMD_ID_MAX * 2, + true); + if (r) { + dev_err(adev->dev, "wave coverage failed when clear first 224 sgprs\n"); + wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */ + goto disp0_failed; + } + + r = gfx_v9_4_2_run_shader(adev, + &adev->gfx.compute_ring[1], + &disp_ibs[1], + sgpr96_init_compute_shader_aldebaran, + sizeof(sgpr96_init_compute_shader_aldebaran), + sgpr96_init_regs_aldebaran, + ARRAY_SIZE(sgpr96_init_regs_aldebaran), + adev->gfx.cu_info.number * 2, + wb_ib.gpu_addr, pattern[1], &fences[1]); + if (r) { + dev_err(adev->dev, "failed to clear next 576 sgprs\n"); + goto disp0_failed; + } + + r = gfx_v9_4_2_wait_for_waves_assigned(adev, + &wb_ib.ptr[1], 0b11111100, + pattern[1], adev->gfx.cu_info.number * SIMD_ID_MAX * 6, + true); + if (r) { + dev_err(adev->dev, "wave coverage failed when clear first 576 sgprs\n"); + wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */ + goto disp1_failed; + } + + wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */ + + /* wait for the GPU to finish processing the IB */ + r = dma_fence_wait(fences[0], false); + if (r) { + dev_err(adev->dev, "timeout to clear first 224 sgprs\n"); + goto disp1_failed; + } + + r = dma_fence_wait(fences[1], false); + if (r) { + dev_err(adev->dev, "timeout to clear first 576 sgprs\n"); + goto disp1_failed; + } + + memset(wb_ib.ptr, 0, (1 + wb_size) * sizeof(uint32_t)); + r = gfx_v9_4_2_run_shader(adev, + &adev->gfx.compute_ring[0], + &disp_ibs[2], + sgpr64_init_compute_shader_aldebaran, + sizeof(sgpr64_init_compute_shader_aldebaran), + sgpr64_init_regs_aldebaran, + ARRAY_SIZE(sgpr64_init_regs_aldebaran), + adev->gfx.cu_info.number, + wb_ib.gpu_addr, pattern[2], &fences[2]); + if (r) { + dev_err(adev->dev, "failed to clear first 256 sgprs\n"); + goto disp1_failed; + } + + r = gfx_v9_4_2_wait_for_waves_assigned(adev, + &wb_ib.ptr[1], 0b1111, + pattern[2], + adev->gfx.cu_info.number * SIMD_ID_MAX * 4, + true); + if (r) { + dev_err(adev->dev, "wave coverage failed when clear first 256 sgprs\n"); + wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */ + goto disp2_failed; + } + + wb_ib.ptr[0] = 0xdeadbeaf; /* stop waves */ + + r = dma_fence_wait(fences[2], false); + if (r) { + dev_err(adev->dev, "timeout to clear first 256 sgprs\n"); + goto disp2_failed; + } + +disp2_failed: + amdgpu_ib_free(adev, &disp_ibs[2], NULL); + dma_fence_put(fences[2]); +disp1_failed: + amdgpu_ib_free(adev, &disp_ibs[1], NULL); + dma_fence_put(fences[1]); +disp0_failed: + amdgpu_ib_free(adev, &disp_ibs[0], NULL); + dma_fence_put(fences[0]); +pro_end: + amdgpu_ib_free(adev, &wb_ib, NULL); + + if (r) + dev_info(adev->dev, "Init SGPRS Failed\n"); + else + dev_info(adev->dev, "Init SGPRS Successfully\n"); + + return r; +} + +static int gfx_v9_4_2_do_vgprs_init(struct amdgpu_device *adev) +{ + int r; + /* CU_ID: 0~15, SIMD_ID: 0~3, WAVE_ID: 0 ~ 9 */ + int wb_size = adev->gfx.config.max_shader_engines * + CU_ID_MAX * SIMD_ID_MAX * WAVE_ID_MAX; + struct amdgpu_ib wb_ib; + struct amdgpu_ib disp_ib; + struct dma_fence *fence; + u32 pattern = 0xa; + + /* bail if the compute ring is not ready */ + if (!adev->gfx.compute_ring[0].sched.ready) + return 0; + + /* allocate the write-back buffer from IB */ + memset(&wb_ib, 0, sizeof(wb_ib)); + r = amdgpu_ib_get(adev, NULL, (1 + wb_size) * sizeof(uint32_t), + AMDGPU_IB_POOL_DIRECT, &wb_ib); + if (r) { + dev_err(adev->dev, "failed to get ib (%d) for wb.\n", r); + return r; + } + memset(wb_ib.ptr, 0, (1 + wb_size) * sizeof(uint32_t)); + + r = gfx_v9_4_2_run_shader(adev, + &adev->gfx.compute_ring[0], + &disp_ib, + vgpr_init_compute_shader_aldebaran, + sizeof(vgpr_init_compute_shader_aldebaran), + vgpr_init_regs_aldebaran, + ARRAY_SIZE(vgpr_init_regs_aldebaran), + adev->gfx.cu_info.number, + wb_ib.gpu_addr, pattern, &fence); + if (r) { + dev_err(adev->dev, "failed to clear vgprs\n"); + goto pro_end; + } + + /* wait for the GPU to finish processing the IB */ + r = dma_fence_wait(fence, false); + if (r) { + dev_err(adev->dev, "timeout to clear vgprs\n"); + goto disp_failed; + } + + r = gfx_v9_4_2_wait_for_waves_assigned(adev, + &wb_ib.ptr[1], 0b1, + pattern, + adev->gfx.cu_info.number * SIMD_ID_MAX, + false); + if (r) { + dev_err(adev->dev, "failed to cover all simds when clearing vgprs\n"); + goto disp_failed; + } + +disp_failed: + amdgpu_ib_free(adev, &disp_ib, NULL); + dma_fence_put(fence); +pro_end: + amdgpu_ib_free(adev, &wb_ib, NULL); + + if (r) + dev_info(adev->dev, "Init VGPRS Failed\n"); + else + dev_info(adev->dev, "Init VGPRS Successfully\n"); + + return r; +} + +int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev) +{ + /* only support when RAS is enabled */ + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return 0; + + gfx_v9_4_2_do_sgprs_init(adev); + + gfx_v9_4_2_do_vgprs_init(adev); + + return 0; +} + static void gfx_v9_4_2_query_sq_timeout_status(struct amdgpu_device *adev); static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev); @@ -148,11 +782,6 @@ void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, GC_THROTTLE_CTRL1, PWRBRK_STALL_EN, 1); WREG32_SOC15(GC, 0, regGC_THROTTLE_CTRL1, tmp); - WREG32_SOC15(GC, 0, regDIDT_IND_INDEX, ixDIDT_SQ_THROTTLE_CTRL); - tmp = 0; - tmp = REG_SET_FIELD(tmp, DIDT_SQ_THROTTLE_CTRL, PWRBRK_STALL_EN, 1); - WREG32_SOC15(GC, 0, regDIDT_IND_DATA, tmp); - WREG32_SOC15(GC, 0, regGC_CAC_IND_INDEX, ixPWRBRK_STALL_PATTERN_CTRL); tmp = 0; tmp = REG_SET_FIELD(tmp, PWRBRK_STALL_PATTERN_CTRL, PWRBRK_END_STEP, 0x12); @@ -808,8 +1437,9 @@ static struct gfx_v9_4_2_utc_block gfx_v9_4_2_utc_blocks[] = { REG_SET_FIELD(0, ATC_L2_CACHE_4K_DSM_CNTL, WRITE_COUNTERS, 1) }, }; -static const struct soc15_reg_entry gfx_v9_4_2_ea_err_status_regs = - { SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16 }; +static const struct soc15_reg_entry gfx_v9_4_2_ea_err_status_regs = { + SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16 +}; static int gfx_v9_4_2_get_reg_error_count(struct amdgpu_device *adev, const struct soc15_reg_entry *reg, @@ -1006,8 +1636,8 @@ static int gfx_v9_4_2_query_utc_edc_count(struct amdgpu_device *adev, return 0; } -int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev, - void *ras_error_status) +static int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; uint32_t sec_count = 0, ded_count = 0; @@ -1039,20 +1669,24 @@ static void gfx_v9_4_2_reset_utc_err_status(struct amdgpu_device *adev) static void gfx_v9_4_2_reset_ea_err_status(struct amdgpu_device *adev) { uint32_t i, j; + uint32_t value; mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < gfx_v9_4_2_ea_err_status_regs.se_num; i++) { for (j = 0; j < gfx_v9_4_2_ea_err_status_regs.instance; j++) { gfx_v9_4_2_select_se_sh(adev, i, 0, j); - WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), 0x10); + value = RREG32(SOC15_REG_ENTRY_OFFSET( + gfx_v9_4_2_ea_err_status_regs)); + value = REG_SET_FIELD(value, GCEA_ERR_STATUS, CLEAR_ERROR_STATUS, 0x1); + WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), value); } } gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); } -void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev) +static void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev) { if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) return; @@ -1061,7 +1695,7 @@ void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev) gfx_v9_4_2_query_utc_edc_count(adev, NULL, NULL); } -int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if) +static int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if) { struct ras_inject_if *info = (struct ras_inject_if *)inject_if; int ret; @@ -1096,6 +1730,7 @@ static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev) gfx_v9_4_2_select_se_sh(adev, i, 0, j); reg_value = RREG32(SOC15_REG_ENTRY_OFFSET( gfx_v9_4_2_ea_err_status_regs)); + if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) || REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) || REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) { @@ -1103,7 +1738,9 @@ static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev) j, reg_value); } /* clear after read */ - WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), 0x10); + reg_value = REG_SET_FIELD(reg_value, GCEA_ERR_STATUS, + CLEAR_ERROR_STATUS, 0x1); + WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), reg_value); } } @@ -1134,7 +1771,7 @@ static void gfx_v9_4_2_query_utc_err_status(struct amdgpu_device *adev) } } -void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev) +static void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev) { if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) return; @@ -1144,7 +1781,7 @@ void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev) gfx_v9_4_2_query_sq_timeout_status(adev); } -void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev) +static void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev) { if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) return; @@ -1154,7 +1791,7 @@ void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev) gfx_v9_4_2_reset_sq_timeout_status(adev); } -void gfx_v9_4_2_enable_watchdog_timer(struct amdgpu_device *adev) +static void gfx_v9_4_2_enable_watchdog_timer(struct amdgpu_device *adev) { uint32_t i; uint32_t data; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h index 81c5833b6b9f..6db1f88509af 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h @@ -29,6 +29,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev, void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev, uint32_t die_id); void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev); +int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev); extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 1e4678cb98f0..bda1542ef1dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -283,10 +283,14 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, block_size); - /* Send no-retry XNACK on fault to suppress VM fault storm. */ + /* Send no-retry XNACK on fault to suppress VM fault storm. + * On Aldebaran, XNACK can be enabled in the SQ per-process. + * Retry faults need to be enabled for that to work. + */ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, - !adev->gmc.noretry); + !adev->gmc.noretry || + adev->asic_type == CHIP_ALDEBARAN); WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i * hub->ctx_distance, tmp); WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, @@ -317,18 +321,6 @@ static void gfxhub_v1_0_program_invalidation(struct amdgpu_device *adev) static int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev) && adev->asic_type != CHIP_ARCTURUS) { - /* - * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are - * VF copy registers so vbios post doesn't program them, for - * SRIOV driver need to program them - */ - WREG32_SOC15_RLC(GC, 0, mmMC_VM_FB_LOCATION_BASE, - adev->gmc.vram_start >> 24); - WREG32_SOC15_RLC(GC, 0, mmMC_VM_FB_LOCATION_TOP, - adev->gmc.vram_end >> 24); - } - /* GART Enable. */ gfxhub_v1_0_init_gart_aperture_regs(adev); gfxhub_v1_0_init_system_aperture_regs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c index 41807817de7d..1a374ec0514a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c @@ -31,6 +31,9 @@ #include "soc15_common.h" +#define mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP 0x16f8 +#define mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP_BASE_IDX 0 + static const char *gfxhub_client_ids[] = { "CB/DB", "Reserved", @@ -531,6 +534,42 @@ static int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev) return 0; } +static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev) +{ + int i; + u32 tmp = 0, disabled_sa = 0; + u32 efuse_setting, vbios_setting; + + u32 max_sa_mask = amdgpu_gfx_create_bitmask( + adev->gfx.config.max_sh_per_se * + adev->gfx.config.max_shader_engines); + + if (adev->asic_type == CHIP_YELLOW_CARP) { + /* Get SA disabled bitmap from eFuse setting */ + efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE); + efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK; + efuse_setting >>= CC_GC_SA_UNIT_DISABLE__SA_DISABLE__SHIFT; + + /* Get SA disabled bitmap from VBIOS setting */ + vbios_setting = RREG32_SOC15(GC, 0, mmGC_USER_SA_UNIT_DISABLE); + vbios_setting &= GC_USER_SA_UNIT_DISABLE__SA_DISABLE_MASK; + vbios_setting >>= GC_USER_SA_UNIT_DISABLE__SA_DISABLE__SHIFT; + + disabled_sa |= efuse_setting | vbios_setting; + /* Make sure not to report harvested SAs beyond the max SA count */ + disabled_sa &= max_sa_mask; + + for (i = 0; disabled_sa > 0; i++) { + if (disabled_sa & 1) + tmp |= 0x3 << (i * 2); + disabled_sa >>= 1; + } + disabled_sa = tmp; + + WREG32_SOC15(GC, 0, mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP, disabled_sa); + } +} + const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = { .get_fb_location = gfxhub_v2_1_get_fb_location, .get_mc_fb_offset = gfxhub_v2_1_get_mc_fb_offset, @@ -540,4 +579,5 @@ const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = { .set_fault_enable_default = gfxhub_v2_1_set_fault_enable_default, .init = gfxhub_v2_1_init, .get_xgmi_info = gfxhub_v2_1_get_xgmi_info, + .utcl2_harvest = gfxhub_v2_1_utcl2_harvest, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 498b28a35f5b..4523df2785d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -229,6 +229,10 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, /* Use register 17 for GART */ const unsigned eng = 17; unsigned int i; + unsigned char hub_ip = 0; + + hub_ip = (vmhub == AMDGPU_GFXHUB_0) ? + GC_HWIP : MMHUB_HWIP; spin_lock(&adev->gmc.invalidate_lock); /* @@ -242,8 +246,9 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, if (use_semaphore) { for (i = 0; i < adev->usec_timeout; i++) { /* a read return value of 1 means semaphore acuqire */ - tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + - hub->eng_distance * eng); + tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem + + hub->eng_distance * eng, hub_ip); + if (tmp & 0x1) break; udelay(1); @@ -253,7 +258,9 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); } - WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); + WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + + hub->eng_distance * eng, + inv_req, hub_ip); /* * Issue a dummy read to wait for the ACK register to be cleared @@ -261,12 +268,14 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, */ if ((vmhub == AMDGPU_GFXHUB_0) && (adev->asic_type < CHIP_SIENNA_CICHLID)) - RREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng); + RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + + hub->eng_distance * eng, hub_ip); /* Wait for ACK with a delay.*/ for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + - hub->eng_distance * eng); + tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack + + hub->eng_distance * eng, hub_ip); + tmp &= 1 << vmid; if (tmp) break; @@ -280,15 +289,15 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, * add semaphore release after invalidation, * write with 0 means semaphore release */ - WREG32_NO_KIQ(hub->vm_inv_eng0_sem + - hub->eng_distance * eng, 0); + WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem + + hub->eng_distance * eng, 0, hub_ip); spin_unlock(&adev->gmc.invalidate_lock); if (i < adev->usec_timeout) return; - DRM_ERROR("Timeout waiting for VM flush ACK!\n"); + DRM_ERROR("Timeout waiting for VM flush hub: %d!\n", vmhub); } /** @@ -666,6 +675,7 @@ static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_VANGOGH: + case CHIP_YELLOW_CARP: adev->mmhub.funcs = &mmhub_v2_3_funcs; break; default: @@ -681,6 +691,8 @@ static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: adev->gfxhub.funcs = &gfxhub_v2_1_funcs; break; default: @@ -796,6 +808,8 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: default: adev->gmc.gart_size = 512ULL << 20; break; @@ -863,6 +877,8 @@ static int gmc_v10_0_sw_init(void *handle) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: adev->num_vmhubs = 2; /* * To fulfill 4-level page support, @@ -920,6 +936,7 @@ static int gmc_v10_0_sw_init(void *handle) return r; amdgpu_gmc_get_vbios_allocations(adev); + amdgpu_gmc_get_reserved_allocation(adev); /* Memory manager */ r = amdgpu_bo_init(adev); @@ -944,7 +961,7 @@ static int gmc_v10_0_sw_init(void *handle) } /** - * gmc_v8_0_gart_fini - vm fini callback + * gmc_v10_0_gart_fini - vm fini callback * * @adev: amdgpu_device pointer * @@ -953,7 +970,6 @@ static int gmc_v10_0_sw_init(void *handle) static void gmc_v10_0_gart_fini(struct amdgpu_device *adev) { amdgpu_gart_table_vram_free(adev); - amdgpu_gart_fini(adev); } static int gmc_v10_0_sw_fini(void *handle) @@ -978,6 +994,8 @@ static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: break; default: break; @@ -1041,6 +1059,13 @@ static int gmc_v10_0_hw_init(void *handle) /* The sequence of these two function calls matters.*/ gmc_v10_0_init_golden_registers(adev); + /* + * harvestable groups in gc_utcl2 need to be programmed before any GFX block + * register setup within GMC, or else system hang when harvesting SA. + */ + if (adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest) + adev->gfxhub.funcs->utcl2_harvest(adev); + r = gmc_v10_0_gart_enable(adev); if (r) return r; @@ -1133,7 +1158,7 @@ static int gmc_v10_0_set_clockgating_state(void *handle, return r; if (adev->asic_type >= CHIP_SIENNA_CICHLID && - adev->asic_type <= CHIP_DIMGREY_CAVEFISH) + adev->asic_type <= CHIP_YELLOW_CARP) return athub_v2_1_set_clockgating(adev, state); else return athub_v2_0_set_clockgating(adev, state); @@ -1146,7 +1171,7 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags) adev->mmhub.funcs->get_clockgating(adev, flags); if (adev->asic_type >= CHIP_SIENNA_CICHLID && - adev->asic_type <= CHIP_DIMGREY_CAVEFISH) + adev->asic_type <= CHIP_YELLOW_CARP) athub_v2_1_get_clockgating(adev, flags); else athub_v2_0_get_clockgating(adev, flags); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 405d6ad09022..0e81e03e9b49 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -898,7 +898,6 @@ static int gmc_v6_0_sw_fini(void *handle) amdgpu_vm_manager_fini(adev); amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); - amdgpu_gart_fini(adev); release_firmware(adev->gmc.fw); adev->gmc.fw = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 210ada2289ec..0a50fdaced7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -516,7 +516,7 @@ static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev, } /** - * gmc_v8_0_set_fault_enable_default - update VM fault handling + * gmc_v7_0_set_fault_enable_default - update VM fault handling * * @adev: amdgpu_device pointer * @value: true redirects VM faults to the default page @@ -1085,7 +1085,6 @@ static int gmc_v7_0_sw_fini(void *handle) kfree(adev->gmc.vm_fault_info); amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); - amdgpu_gart_fini(adev); release_firmware(adev->gmc.fw); adev->gmc.fw = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index e4f27b3f28fb..492ebed2915b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1201,7 +1201,6 @@ static int gmc_v8_0_sw_fini(void *handle) kfree(adev->gmc.vm_fault_info); amdgpu_gart_table_vram_free(adev); amdgpu_bo_fini(adev); - amdgpu_gart_fini(adev); release_firmware(adev->gmc.fw); adev->gmc.fw = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 455bb91060d0..7eb70d69f760 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -53,6 +53,7 @@ #include "mmhub_v1_7.h" #include "umc_v6_1.h" #include "umc_v6_0.h" +#include "hdp_v4_0.h" #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" @@ -1210,6 +1211,11 @@ static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev) adev->gfxhub.funcs = &gfxhub_v1_0_funcs; } +static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev) +{ + adev->hdp.ras_funcs = &hdp_v4_0_ras_funcs; +} + static int gmc_v9_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1230,6 +1236,7 @@ static int gmc_v9_0_early_init(void *handle) gmc_v9_0_set_mmhub_funcs(adev); gmc_v9_0_set_mmhub_ras_funcs(adev); gmc_v9_0_set_gfxhub_funcs(adev); + gmc_v9_0_set_hdp_ras_funcs(adev); adev->gmc.shared_aperture_start = 0x2000000000000000ULL; adev->gmc.shared_aperture_end = @@ -1255,15 +1262,21 @@ static int gmc_v9_0_late_init(void *handle) * writes, while disables HBM ECC for vega10. */ if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) { - if (!(adev->ras_features & (1 << AMDGPU_RAS_BLOCK__UMC))) { + if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) { if (adev->df.funcs->enable_ecc_force_par_wr_rmw) adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); } } - if (adev->mmhub.ras_funcs && - adev->mmhub.ras_funcs->reset_ras_error_count) - adev->mmhub.ras_funcs->reset_ras_error_count(adev); + if (!amdgpu_persistent_edc_harvesting_supported(adev)) { + if (adev->mmhub.ras_funcs && + adev->mmhub.ras_funcs->reset_ras_error_count) + adev->mmhub.ras_funcs->reset_ras_error_count(adev); + + if (adev->hdp.ras_funcs && + adev->hdp.ras_funcs->reset_ras_error_count) + adev->hdp.ras_funcs->reset_ras_error_count(adev); + } r = amdgpu_gmc_ras_late_init(adev); if (r) @@ -1275,10 +1288,7 @@ static int gmc_v9_0_late_init(void *handle) static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) { - u64 base = 0; - - if (!amdgpu_sriov_vf(adev)) - base = adev->mmhub.funcs->get_fb_location(adev); + u64 base = adev->mmhub.funcs->get_fb_location(adev); /* add the xgmi offset of the physical node */ base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; @@ -1602,7 +1612,6 @@ static int gmc_v9_0_sw_fini(void *handle) amdgpu_gart_table_vram_free(adev); amdgpu_bo_unref(&adev->gmc.pdb0_bo); amdgpu_bo_fini(adev); - amdgpu_gart_fini(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c index edbd35d293eb..74b90cc2bf48 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c @@ -59,12 +59,31 @@ static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev, HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); } +static void hdp_v4_0_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + err_data->ue_count = 0; + err_data->ce_count = 0; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP)) + return; + + /* HDP SRAM errors are uncorrectable ones (i.e. fatal errors) */ + err_data->ue_count += RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT); +}; + static void hdp_v4_0_reset_ras_error_count(struct amdgpu_device *adev) { if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP)) return; - /*read back hdp ras counter to reset it to 0 */ - RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT); + + if (adev->asic_type >= CHIP_ALDEBARAN) + WREG32_SOC15(HDP, 0, mmHDP_EDC_CNT, 0); + else + /*read back hdp ras counter to reset it to 0 */ + RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT); } static void hdp_v4_0_update_clock_gating(struct amdgpu_device *adev, @@ -130,10 +149,16 @@ static void hdp_v4_0_init_registers(struct amdgpu_device *adev) WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40)); } +const struct amdgpu_hdp_ras_funcs hdp_v4_0_ras_funcs = { + .ras_late_init = amdgpu_hdp_ras_late_init, + .ras_fini = amdgpu_hdp_ras_fini, + .query_ras_error_count = hdp_v4_0_query_ras_error_count, + .reset_ras_error_count = hdp_v4_0_reset_ras_error_count, +}; + const struct amdgpu_hdp_funcs hdp_v4_0_funcs = { .flush_hdp = hdp_v4_0_flush_hdp, .invalidate_hdp = hdp_v4_0_invalidate_hdp, - .reset_ras_error_count = hdp_v4_0_reset_ras_error_count, .update_clock_gating = hdp_v4_0_update_clock_gating, .get_clock_gating_state = hdp_v4_0_get_clockgating_state, .init_registers = hdp_v4_0_init_registers, diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h index d1e6399e8c46..dc3a1b81dd62 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h @@ -27,5 +27,6 @@ #include "soc15_common.h" extern const struct amdgpu_hdp_funcs hdp_v4_0_funcs; +extern const struct amdgpu_hdp_ras_funcs hdp_v4_0_ras_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index cc957471f31e..ddfe4eaeea05 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -300,8 +300,7 @@ static int iceland_ih_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev, &adev->irq.ih); + amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index bd77794315bc..01c242c5abc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -49,10 +49,13 @@ static int jpeg_v3_0_set_powergating_state(void *handle, static int jpeg_v3_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING); - if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) - return -ENOENT; + if (adev->asic_type != CHIP_YELLOW_CARP) { + u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING); + + if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) + return -ENOENT; + } adev->jpeg.num_jpeg_inst = 1; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c index 0103a5ab28e6..f80a14a1b82d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c @@ -111,6 +111,9 @@ static void mmhub_v1_7_init_system_aperture_regs(struct amdgpu_device *adev) WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); + if (amdgpu_sriov_vf(adev)) + return; + /* Program the system aperture low logical page number. */ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); @@ -129,8 +132,6 @@ static void mmhub_v1_7_init_system_aperture_regs(struct amdgpu_device *adev) WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF); WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0); } - if (amdgpu_sriov_vf(adev)) - return; /* Set default page address. */ value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr); @@ -296,10 +297,12 @@ static void mmhub_v1_7_setup_vmid_config(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, block_size); - /* Send no-retry XNACK on fault to suppress VM fault storm. */ + /* On Aldebaran, XNACK can be enabled in the SQ per-process. + * Retry faults need to be enabled for that to work. + */ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, - !adev->gmc.noretry); + 1); WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_CNTL, i * hub->ctx_distance, tmp); WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, @@ -330,18 +333,6 @@ static void mmhub_v1_7_program_invalidation(struct amdgpu_device *adev) static int mmhub_v1_7_gart_enable(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev)) { - /* - * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are - * VF copy registers so vbios post doesn't program them, for - * SRIOV driver need to program them - */ - WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, - adev->gmc.vram_start >> 24); - WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, - adev->gmc.vram_end >> 24); - } - /* GART Enable. */ mmhub_v1_7_init_gart_aperture_regs(adev); mmhub_v1_7_init_system_aperture_regs(adev); @@ -1313,12 +1304,31 @@ static void mmhub_v1_7_query_ras_error_status(struct amdgpu_device *adev) } } +static void mmhub_v1_7_reset_ras_error_status(struct amdgpu_device *adev) +{ + int i; + uint32_t reg_value; + + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) + return; + + for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_ea_err_status_regs); i++) { + reg_value = RREG32(SOC15_REG_ENTRY_OFFSET( + mmhub_v1_7_ea_err_status_regs[i])); + reg_value = REG_SET_FIELD(reg_value, MMEA0_ERR_STATUS, + CLEAR_ERROR_STATUS, 0x01); + WREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_7_ea_err_status_regs[i]), + reg_value); + } +} + const struct amdgpu_mmhub_ras_funcs mmhub_v1_7_ras_funcs = { .ras_late_init = amdgpu_mmhub_ras_late_init, .ras_fini = amdgpu_mmhub_ras_fini, .query_ras_error_count = mmhub_v1_7_query_ras_error_count, .reset_ras_error_count = mmhub_v1_7_reset_ras_error_count, .query_ras_error_status = mmhub_v1_7_query_ras_error_status, + .reset_ras_error_status = mmhub_v1_7_reset_ras_error_status, }; const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index ac76081b91d5..f7e93bbc4e15 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -29,6 +29,7 @@ #include "mmhub/mmhub_2_0_0_default.h" #include "navi10_enum.h" +#include "gc/gc_10_1_0_offset.h" #include "soc15_common.h" #define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid 0x064d @@ -93,6 +94,30 @@ static const char *mmhub_client_ids_sienna_cichlid[][2] = { [15][1] = "OSS", }; +static const char *mmhub_client_ids_beige_goby[][2] = { + [3][0] = "DCEDMC", + [4][0] = "DCEVGA", + [5][0] = "MP0", + [6][0] = "MP1", + [8][0] = "VMC", + [9][0] = "VCNU0", + [11][0] = "VCN0", + [14][0] = "HDP", + [15][0] = "OSS", + [0][1] = "DBGU0", + [1][1] = "DBGU1", + [2][1] = "DCEDWB", + [3][1] = "DCEDMC", + [4][1] = "DCEVGA", + [5][1] = "MP0", + [6][1] = "MP1", + [7][1] = "XDP", + [9][1] = "VCNU0", + [11][1] = "VCN0", + [14][1] = "HDP", + [15][1] = "OSS", +}; + static uint32_t mmhub_v2_0_get_invalidate_req(unsigned int vmid, uint32_t flush_type) { @@ -139,6 +164,9 @@ mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev, case CHIP_DIMGREY_CAVEFISH: mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw]; break; + case CHIP_BEIGE_GOBY: + mmhub_cid = mmhub_client_ids_beige_goby[cid][rw]; + break; default: mmhub_cid = NULL; break; @@ -165,11 +193,11 @@ static void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmi { struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0]; - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base)); - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, hub->ctx_addr_distance * vmid, upper_32_bits(page_table_base)); } @@ -180,14 +208,14 @@ static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev) mmhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base); - WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, + WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, (u32)(adev->gmc.gart_start >> 12)); - WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, + WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, (u32)(adev->gmc.gart_start >> 44)); - WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, + WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, (u32)(adev->gmc.gart_end >> 12)); - WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, + WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, (u32)(adev->gmc.gart_end >> 44)); } @@ -196,12 +224,12 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev) uint64_t value; uint32_t tmp; - /* Program the AGP BAR */ - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0); - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); - WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); - if (!amdgpu_sriov_vf(adev)) { + /* Program the AGP BAR */ + WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0); + WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); + /* Program the system aperture low logical page number. */ WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); @@ -308,7 +336,7 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); - WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp); + WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp); } static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev) @@ -370,16 +398,16 @@ static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, !adev->gmc.noretry); - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_CNTL, i * hub->ctx_distance, tmp); - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i * hub->ctx_addr_distance, 0); - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i * hub->ctx_addr_distance, 0); - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, i * hub->ctx_addr_distance, lower_32_bits(adev->vm_manager.max_pfn - 1)); - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, i * hub->ctx_addr_distance, upper_32_bits(adev->vm_manager.max_pfn - 1)); } @@ -391,9 +419,9 @@ static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev) unsigned i; for (i = 0; i < 18; ++i) { - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, i * hub->eng_addr_distance, 0xffffffff); - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, i * hub->eng_addr_distance, 0x1f); } } @@ -422,7 +450,7 @@ static void mmhub_v2_0_gart_disable(struct amdgpu_device *adev) /* Disable all tables */ for (i = 0; i < AMDGPU_NUM_VMID; i++) - WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, + WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, i * hub->ctx_distance, 0); /* Setup TLB control */ @@ -544,6 +572,7 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid); def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid); break; @@ -578,6 +607,7 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: if (def != data) WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data); if (def1 != data1) @@ -601,6 +631,7 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid); break; default: @@ -618,6 +649,7 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data); break; default: @@ -640,6 +672,7 @@ static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: mmhub_v2_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); mmhub_v2_0_update_medium_grain_light_sleep(adev, @@ -663,6 +696,7 @@ static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags) case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid); data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid); break; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c index a9899335d0b1..88e457a150e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c @@ -92,6 +92,7 @@ mmhub_v2_3_print_l2_protection_fault_status(struct amdgpu_device *adev, status); switch (adev->asic_type) { case CHIP_VANGOGH: + case CHIP_YELLOW_CARP: mmhub_cid = mmhub_client_ids_vangogh[cid][rw]; break; default: @@ -569,9 +570,9 @@ static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev, return 0; mmhub_v2_3_update_medium_grain_clock_gating(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); mmhub_v2_3_update_medium_grain_light_sleep(adev, - state == AMD_CG_STATE_GATE ? true : false); + state == AMD_CG_STATE_GATE); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 47c8dd9d1c78..c4ef822bbe8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -436,7 +436,7 @@ static void mmhub_v9_4_gart_disable(struct amdgpu_device *adev) } /** - * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling + * mmhub_v9_4_set_fault_enable_default - update GART/VM fault handling * * @adev: amdgpu_device pointer * @value: true redirects VM faults to the default page diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index f4e4040bbd25..530011622801 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -120,11 +120,23 @@ force_update_wptr_for_self_int(struct amdgpu_device *adev, ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1, RB_USED_INT_THRESHOLD, threshold); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl); + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1, ih_rb_cntl)) + return; + } else { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl); + } + ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2); ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2, RB_USED_INT_THRESHOLD, threshold); - WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl); + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2, ih_rb_cntl)) + return; + } else { + WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl); + } + WREG32_SOC15(OSSSYS, 0, mmIH_CNTL2, ih_cntl); } @@ -151,7 +163,13 @@ static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev, /* enable_intr field is only valid in ring0 */ if (ih == &adev->irq.ih) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); - WREG32(ih_regs->ih_rb_cntl, tmp); + + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) + return -ETIMEDOUT; + } else { + WREG32(ih_regs->ih_rb_cntl, tmp); + } if (enable) { ih->enabled = true; @@ -261,7 +279,15 @@ static int navi10_ih_enable_ring(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1); } - WREG32(ih_regs->ih_rb_cntl, tmp); + + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { + DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); + return -ETIMEDOUT; + } + } else { + WREG32(ih_regs->ih_rb_cntl, tmp); + } if (ih == &adev->irq.ih) { /* set the ih ring 0 writeback address whether it's enabled or not */ @@ -311,6 +337,8 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_Sienna_Cichlid); ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1); @@ -569,11 +597,7 @@ static int navi10_ih_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft); - amdgpu_ih_ring_fini(adev, &adev->irq.ih2); - amdgpu_ih_ring_fini(adev, &adev->irq.ih1); - amdgpu_ih_ring_fini(adev, &adev->irq.ih); + amdgpu_irq_fini_sw(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c index 598ce0e93627..8f2a315e7c73 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c @@ -28,6 +28,25 @@ #include "nbio/nbio_7_2_0_sh_mask.h" #include <uapi/linux/kfd_ioctl.h> +#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_YC 0x0015 +#define regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_YC_BASE_IDX 2 +#define regBIF_BX0_BIF_FB_EN_YC 0x0100 +#define regBIF_BX0_BIF_FB_EN_YC_BASE_IDX 2 +#define regBIF1_PCIE_MST_CTRL_3 0x4601c6 +#define regBIF1_PCIE_MST_CTRL_3_BASE_IDX 5 +#define BIF1_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_MODE__SHIFT \ + 0x1b +#define BIF1_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV__SHIFT \ + 0x1c +#define BIF1_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_MODE_MASK \ + 0x08000000L +#define BIF1_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV_MASK \ + 0x30000000L +#define regBIF1_PCIE_TX_POWER_CTRL_1 0x460187 +#define regBIF1_PCIE_TX_POWER_CTRL_1_BASE_IDX 5 +#define BIF1_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK 0x00000001L +#define BIF1_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK 0x00000008L + static void nbio_v7_2_remap_hdp_registers(struct amdgpu_device *adev) { WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL, @@ -38,7 +57,12 @@ static void nbio_v7_2_remap_hdp_registers(struct amdgpu_device *adev) static u32 nbio_v7_2_get_rev_id(struct amdgpu_device *adev) { - u32 tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0); + u32 tmp; + + if (adev->asic_type == CHIP_YELLOW_CARP) + tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_YC); + else + tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0); tmp &= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; tmp >>= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; @@ -49,11 +73,19 @@ static u32 nbio_v7_2_get_rev_id(struct amdgpu_device *adev) static void nbio_v7_2_mc_access_enable(struct amdgpu_device *adev, bool enable) { if (enable) - WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, - BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK | - BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK); + if (adev->asic_type == CHIP_YELLOW_CARP) + WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN_YC, + BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK | + BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK); + else + WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, + BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK | + BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK); else - WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0); + if (adev->asic_type == CHIP_YELLOW_CARP) + WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN_YC, 0); + else + WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0); } static u32 nbio_v7_2_get_memsize(struct amdgpu_device *adev) @@ -92,13 +124,13 @@ static void nbio_v7_2_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do if (use_doorbell) { doorbell_range = REG_SET_FIELD(doorbell_range, - GDC0_BIF_VCN0_DOORBELL_RANGE, OFFSET, - doorbell_index); + GDC0_BIF_VCN0_DOORBELL_RANGE, OFFSET, + doorbell_index); doorbell_range = REG_SET_FIELD(doorbell_range, - GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 8); + GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 8); } else { doorbell_range = REG_SET_FIELD(doorbell_range, - GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 0); + GDC0_BIF_VCN0_DOORBELL_RANGE, SIZE, 0); } WREG32_PCIE_PORT(reg, doorbell_range); @@ -123,22 +155,22 @@ static void nbio_v7_2_enable_doorbell_selfring_aperture(struct amdgpu_device *ad if (enable) { tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, - DOORBELL_SELFRING_GPA_APER_EN, 1) | - REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, - DOORBELL_SELFRING_GPA_APER_MODE, 1) | - REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, - DOORBELL_SELFRING_GPA_APER_SIZE, 0); + DOORBELL_SELFRING_GPA_APER_EN, 1) | + REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, + DOORBELL_SELFRING_GPA_APER_MODE, 1) | + REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, + DOORBELL_SELFRING_GPA_APER_SIZE, 0); WREG32_SOC15(NBIO, 0, - regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW, - lower_32_bits(adev->doorbell.base)); + regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW, + lower_32_bits(adev->doorbell.base)); WREG32_SOC15(NBIO, 0, - regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH, - upper_32_bits(adev->doorbell.base)); + regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH, + upper_32_bits(adev->doorbell.base)); } WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, - tmp); + tmp); } @@ -218,19 +250,42 @@ static void nbio_v7_2_update_medium_grain_light_sleep(struct amdgpu_device *adev { uint32_t def, data; - def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2)); - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) { - data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK | - PCIE_CNTL2__MST_MEM_LS_EN_MASK | - PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); + if (adev->asic_type == CHIP_YELLOW_CARP) { + def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2)); + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) + data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK; + else + data &= ~PCIE_CNTL2__SLV_MEM_LS_EN_MASK; + + if (def != data) + WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2), data); + + data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_TX_POWER_CTRL_1)); + def = data; + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) + data |= (BIF1_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK | + BIF1_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK); + else + data &= ~(BIF1_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK | + BIF1_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK); + + if (def != data) + WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_TX_POWER_CTRL_1), + data); } else { - data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | - PCIE_CNTL2__MST_MEM_LS_EN_MASK | - PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); + def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2)); + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) + data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK | + PCIE_CNTL2__MST_MEM_LS_EN_MASK | + PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); + else + data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | + PCIE_CNTL2__MST_MEM_LS_EN_MASK | + PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); + + if (def != data) + WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2), data); } - - if (def != data) - WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2), data); } static void nbio_v7_2_get_clockgating_state(struct amdgpu_device *adev, @@ -297,14 +352,25 @@ const struct nbio_hdp_flush_reg nbio_v7_2_hdp_flush_reg = { static void nbio_v7_2_init_registers(struct amdgpu_device *adev) { uint32_t def, data; - - def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL)); - data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1); - data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1); - - if (def != data) - WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL), - data); + if (adev->asic_type == CHIP_YELLOW_CARP) { + def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3)); + data = REG_SET_FIELD(data, BIF1_PCIE_MST_CTRL_3, + CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1); + data = REG_SET_FIELD(data, BIF1_PCIE_MST_CTRL_3, + CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1); + + if (def != data) + WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data); + } else { + def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL)); + data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, + CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1); + data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, + CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1); + + if (def != data) + WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL), data); + } } const struct amdgpu_nbio_funcs nbio_v7_2_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index d290ca0b06da..455d0425787c 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -218,14 +218,118 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode = .codec_array = sc_video_codecs_decode_array, }; +/* SRIOV Sienna Cichlid, not const since data is controlled by host */ +static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = +{ + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, + .max_width = 4096, + .max_height = 2304, + .max_pixels_per_frame = 4096 * 2304, + .max_level = 0, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, + .max_width = 4096, + .max_height = 2304, + .max_pixels_per_frame = 4096 * 2304, + .max_level = 0, + }, +}; + +static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] = +{ + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, + .max_width = 4096, + .max_height = 4096, + .max_pixels_per_frame = 4096 * 4096, + .max_level = 3, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, + .max_width = 4096, + .max_height = 4096, + .max_pixels_per_frame = 4096 * 4096, + .max_level = 5, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, + .max_width = 4096, + .max_height = 4096, + .max_pixels_per_frame = 4096 * 4096, + .max_level = 52, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, + .max_width = 4096, + .max_height = 4096, + .max_pixels_per_frame = 4096 * 4096, + .max_level = 4, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, + .max_width = 8192, + .max_height = 4352, + .max_pixels_per_frame = 8192 * 4352, + .max_level = 186, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, + .max_width = 4096, + .max_height = 4096, + .max_pixels_per_frame = 4096 * 4096, + .max_level = 0, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, + .max_width = 8192, + .max_height = 4352, + .max_pixels_per_frame = 8192 * 4352, + .max_level = 0, + }, + { + .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, + .max_width = 8192, + .max_height = 4352, + .max_pixels_per_frame = 8192 * 4352, + .max_level = 0, + }, +}; + +static struct amdgpu_video_codecs sriov_sc_video_codecs_encode = +{ + .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_encode_array), + .codec_array = sriov_sc_video_codecs_encode_array, +}; + +static struct amdgpu_video_codecs sriov_sc_video_codecs_decode = +{ + .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array), + .codec_array = sriov_sc_video_codecs_decode_array, +}; + static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, const struct amdgpu_video_codecs **codecs) { switch (adev->asic_type) { case CHIP_SIENNA_CICHLID: + if (amdgpu_sriov_vf(adev)) { + if (encode) + *codecs = &sriov_sc_video_codecs_encode; + else + *codecs = &sriov_sc_video_codecs_decode; + } else { + if (encode) + *codecs = &nv_video_codecs_encode; + else + *codecs = &sc_video_codecs_decode; + } + return 0; case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: case CHIP_VANGOGH: + case CHIP_YELLOW_CARP: if (encode) *codecs = &nv_video_codecs_encode; else @@ -363,7 +467,7 @@ void nv_grbm_select(struct amdgpu_device *adev, grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl); + WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl); } static void nv_vga_set_state(struct amdgpu_device *adev, bool state) @@ -530,10 +634,12 @@ nv_asic_reset_method(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VANGOGH: + case CHIP_YELLOW_CARP: return AMD_RESET_METHOD_MODE2; case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: return AMD_RESET_METHOD_MODE1; default: if (amdgpu_dpm_is_baco_supported(adev)) @@ -598,7 +704,7 @@ static void nv_pcie_gen3_enable(struct amdgpu_device *adev) static void nv_program_aspm(struct amdgpu_device *adev) { - if (amdgpu_aspm != 1) + if (!amdgpu_aspm) return; if (!(adev->flags & AMD_IS_APU) && @@ -675,6 +781,12 @@ legacy_init: case CHIP_DIMGREY_CAVEFISH: dimgrey_cavefish_reg_base_init(adev); break; + case CHIP_BEIGE_GOBY: + beige_goby_reg_base_init(adev); + break; + case CHIP_YELLOW_CARP: + yellow_carp_reg_base_init(adev); + break; default: return -EINVAL; } @@ -742,8 +854,13 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) case CHIP_NAVI12: amdgpu_device_ip_block_add(adev, &nv_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); + if (!amdgpu_sriov_vf(adev)) { + amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); + } else { + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); + amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); + } if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) @@ -764,9 +881,15 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) case CHIP_SIENNA_CICHLID: amdgpu_device_ip_block_add(adev, &nv_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); - if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) - amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); + if (!amdgpu_sriov_vf(adev)) { + amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); + } else { + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); + amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); + } if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && is_support_sw_smu(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); @@ -845,6 +968,48 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); break; + case CHIP_BEIGE_GOBY: + amdgpu_device_ip_block_add(adev, &nv_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); + amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && + is_support_sw_smu(adev)) + amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); + if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); +#if defined(CONFIG_DRM_AMD_DC) + else if (amdgpu_device_has_dc_support(adev)) + amdgpu_device_ip_block_add(adev, &dm_ip_block); +#endif + if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && + is_support_sw_smu(adev)) + amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); + amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); + break; + case CHIP_YELLOW_CARP: + amdgpu_device_ip_block_add(adev, &nv_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); + amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) + amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); + amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); + if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); + if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); +#if defined(CONFIG_DRM_AMD_DC) + else if (amdgpu_device_has_dc_support(adev)) + amdgpu_device_ip_block_add(adev, &dm_ip_block); +#endif + amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); + amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); + break; default: return -EINVAL; } @@ -1068,6 +1233,7 @@ static int nv_common_early_init(void *handle) case CHIP_SIENNA_CICHLID: adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_MC_MGCG | AMD_CG_SUPPORT_VCN_MGCG | @@ -1091,6 +1257,7 @@ static int nv_common_early_init(void *handle) case CHIP_NAVY_FLOUNDER: adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG | @@ -1121,6 +1288,8 @@ static int nv_common_early_init(void *handle) AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_GFX_FGCG | AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_JPEG_MGCG; adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_VCN | @@ -1132,6 +1301,7 @@ static int nv_common_early_init(void *handle) case CHIP_DIMGREY_CAVEFISH: adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG | @@ -1147,6 +1317,49 @@ static int nv_common_early_init(void *handle) AMD_PG_SUPPORT_MMHUB; adev->external_rev_id = adev->rev_id + 0x3c; break; + case CHIP_BEIGE_GOBY: + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_IH_CG | + AMD_CG_SUPPORT_VCN_MGCG; + adev->pg_flags = AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_VCN_DPG | + AMD_PG_SUPPORT_ATHUB | + AMD_PG_SUPPORT_MMHUB; + adev->external_rev_id = adev->rev_id + 0x46; + break; + case CHIP_YELLOW_CARP: + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGLS | + AMD_CG_SUPPORT_GFX_RLC_LS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_FGCG | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_ATHUB_MGCG | + AMD_CG_SUPPORT_ATHUB_LS | + AMD_CG_SUPPORT_IH_CG | + AMD_CG_SUPPORT_VCN_MGCG | + AMD_CG_SUPPORT_JPEG_MGCG; + adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_VCN_DPG | + AMD_PG_SUPPORT_JPEG; + adev->external_rev_id = adev->rev_id + 0x01; + break; default: /* FIXME: not supported yet */ return -EINVAL; @@ -1169,8 +1382,12 @@ static int nv_common_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev)) { xgpu_nv_mailbox_get_irq(adev); + amdgpu_virt_update_sriov_video_codec(adev, + sriov_sc_video_codecs_encode_array, ARRAY_SIZE(sriov_sc_video_codecs_encode_array), + sriov_sc_video_codecs_decode_array, ARRAY_SIZE(sriov_sc_video_codecs_decode_array)); + } return 0; } @@ -1266,6 +1483,7 @@ static int nv_common_set_clockgating_state(void *handle, case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: adev->nbio.funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); adev->nbio.funcs->update_medium_grain_light_sleep(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/nv.h b/drivers/gpu/drm/amd/amdgpu/nv.h index 515d67bf249f..eb9aff1e7253 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.h +++ b/drivers/gpu/drm/amd/amdgpu/nv.h @@ -36,4 +36,7 @@ int navi12_reg_base_init(struct amdgpu_device *adev); int sienna_cichlid_reg_base_init(struct amdgpu_device *adev); void vangogh_reg_base_init(struct amdgpu_device *adev); int dimgrey_cavefish_reg_base_init(struct amdgpu_device *adev); +int beige_goby_reg_base_init(struct amdgpu_device *adev); +int yellow_carp_reg_base_init(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index 96064c343163..dd0dce254901 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -97,7 +97,6 @@ enum psp_gfx_cmd_id GFX_CMD_ID_SETUP_VMR = 0x00000009, /* setup VMR region */ GFX_CMD_ID_DESTROY_VMR = 0x0000000A, /* destroy VMR region */ GFX_CMD_ID_PROG_REG = 0x0000000B, /* program regs */ - GFX_CMD_ID_CLEAR_VF_FW = 0x0000000D, /* Clear VF FW, to be used on VF shutdown. */ GFX_CMD_ID_GET_FW_ATTESTATION = 0x0000000F, /* Query GPUVA of the Fw Attestation DB */ /* IDs upto 0x1F are reserved for older programs (Raven, Vega 10/12/20) */ GFX_CMD_ID_LOAD_TOC = 0x00000020, /* Load TOC and obtain TMR size */ @@ -333,11 +332,16 @@ struct psp_gfx_uresp_fwar_db_info uint32_t fwar_db_addr_hi; }; +/* Command-specific response for boot config. */ +struct psp_gfx_uresp_bootcfg { + uint32_t boot_cfg; /* boot config data */ +}; + /* Union of command-specific responses for GPCOM ring. */ -union psp_gfx_uresp -{ - struct psp_gfx_uresp_reserved reserved; - struct psp_gfx_uresp_fwar_db_info fwar_db_info; +union psp_gfx_uresp { + struct psp_gfx_uresp_reserved reserved; + struct psp_gfx_uresp_bootcfg boot_cfg; + struct psp_gfx_uresp_fwar_db_info fwar_db_info; }; /* Structure of GFX Response buffer. diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 02bba1f3c42e..bc133db2d538 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -23,6 +23,7 @@ #include <linux/firmware.h> #include <linux/module.h> #include <linux/vmalloc.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_psp.h" @@ -63,6 +64,8 @@ MODULE_FIRMWARE("amdgpu/vangogh_asd.bin"); MODULE_FIRMWARE("amdgpu/vangogh_toc.bin"); MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_sos.bin"); MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_ta.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_sos.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_ta.bin"); /* address block */ #define smnMP1_FIRMWARE_FLAGS 0x3010024 @@ -115,6 +118,9 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) case CHIP_DIMGREY_CAVEFISH: chip_name = "dimgrey_cavefish"; break; + case CHIP_BEIGE_GOBY: + chip_name = "beige_goby"; + break; default: BUG(); } @@ -200,6 +206,14 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) if (err) return err; break; + case CHIP_BEIGE_GOBY: + err = psp_init_sos_microcode(psp, chip_name); + if (err) + return err; + err = psp_init_ta_microcode(psp, chip_name); + if (err) + return err; + break; case CHIP_VANGOGH: err = psp_init_asd_microcode(psp, chip_name); if (err) @@ -269,10 +283,8 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp) if (ret) return ret; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - /* Copy PSP KDB binary to memory */ - memcpy(psp->fw_pri_buf, psp->kdb_start_addr, psp->kdb_bin_size); + psp_copy_fw(psp, psp->kdb_start_addr, psp->kdb_bin_size); /* Provide the PSP KDB to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, @@ -302,10 +314,8 @@ static int psp_v11_0_bootloader_load_spl(struct psp_context *psp) if (ret) return ret; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - /* Copy PSP SPL binary to memory */ - memcpy(psp->fw_pri_buf, psp->spl_start_addr, psp->spl_bin_size); + psp_copy_fw(psp, psp->spl_start_addr, psp->spl_bin_size); /* Provide the PSP SPL to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, @@ -335,10 +345,8 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp) if (ret) return ret; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - /* Copy PSP System Driver binary to memory */ - memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size); + psp_copy_fw(psp, psp->sys_start_addr, psp->sys_bin_size); /* Provide the sys driver to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, @@ -371,10 +379,8 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp) if (ret) return ret; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - /* Copy Secure OS binary to PSP memory */ - memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size); + psp_copy_fw(psp, psp->sos_start_addr, psp->sos_bin_size); /* Provide the PSP secure OS to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, @@ -455,6 +461,7 @@ static int psp_v11_0_ring_create(struct psp_context *psp, struct amdgpu_device *adev = psp->adev; if (amdgpu_sriov_vf(adev)) { + ring->ring_wptr = 0; ret = psp_v11_0_ring_stop(psp, ring_type); if (ret) { DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n"); @@ -608,7 +615,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) uint32_t p2c_header[4]; uint32_t sz; void *buf; - int ret; + int ret, idx; if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) { DRM_DEBUG("Memory training is not supported.\n"); @@ -681,17 +688,24 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) return -ENOMEM; } - memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz); - ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN); - if (ret) { - DRM_ERROR("Send long training msg failed.\n"); + if (drm_dev_enter(&adev->ddev, &idx)) { + memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz); + ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN); + if (ret) { + DRM_ERROR("Send long training msg failed.\n"); + vfree(buf); + drm_dev_exit(idx); + return ret; + } + + memcpy_toio(adev->mman.aper_base_kaddr, buf, sz); + adev->hdp.funcs->flush_hdp(adev, NULL); vfree(buf); - return ret; + drm_dev_exit(idx); + } else { + vfree(buf); + return -ENODEV; } - - memcpy_toio(adev->mman.aper_base_kaddr, buf, sz); - adev->hdp.funcs->flush_hdp(adev, NULL); - vfree(buf); } if (ops & PSP_MEM_TRAIN_SAVE) { diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c index c4828bd3264b..618e5b6b85d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c @@ -138,10 +138,8 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp) if (ret) return ret; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - /* Copy PSP System Driver binary to memory */ - memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size); + psp_copy_fw(psp, psp->sys_start_addr, psp->sys_bin_size); /* Provide the sys driver to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, @@ -179,10 +177,8 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp) if (ret) return ret; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - /* Copy Secure OS binary to PSP memory */ - memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size); + psp_copy_fw(psp, psp->sos_start_addr, psp->sos_bin_size); /* Provide the PSP secure OS to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index fcdce46445d6..c8949add88f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -31,6 +31,9 @@ MODULE_FIRMWARE("amdgpu/aldebaran_sos.bin"); MODULE_FIRMWARE("amdgpu/aldebaran_ta.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_asd.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_toc.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_ta.bin"); static int psp_v13_0_init_microcode(struct psp_context *psp) { @@ -42,17 +45,37 @@ static int psp_v13_0_init_microcode(struct psp_context *psp) case CHIP_ALDEBARAN: chip_name = "aldebaran"; break; + case CHIP_YELLOW_CARP: + chip_name = "yellow_carp"; + break; + default: + BUG(); + } + switch (adev->asic_type) { + case CHIP_ALDEBARAN: + err = psp_init_sos_microcode(psp, chip_name); + if (err) + return err; + err = psp_init_ta_microcode(&adev->psp, chip_name); + if (err) + return err; + break; + case CHIP_YELLOW_CARP: + err = psp_init_asd_microcode(psp, chip_name); + if (err) + return err; + err = psp_init_toc_microcode(psp, chip_name); + if (err) + return err; + err = psp_init_ta_microcode(psp, chip_name); + if (err) + return err; + break; default: BUG(); } - err = psp_init_sos_microcode(psp, chip_name); - if (err) - return err; - - err = psp_init_ta_microcode(&adev->psp, chip_name); - - return err; + return 0; } static bool psp_v13_0_is_sos_alive(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index 908664a5774b..b86dcbabb635 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -102,10 +102,8 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) if (ret) return ret; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - /* Copy PSP System Driver binary to memory */ - memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size); + psp_copy_fw(psp, psp->sys_start_addr, psp->sys_bin_size); /* Provide the sys driver to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, @@ -143,10 +141,8 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) if (ret) return ret; - memset(psp->fw_pri_buf, 0, PSP_1_MEG); - /* Copy Secure OS binary to PSP memory */ - memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size); + psp_copy_fw(psp, psp->sos_start_addr, psp->sos_bin_size); /* Provide the PSP secure OS to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, @@ -231,6 +227,7 @@ static int psp_v3_1_ring_create(struct psp_context *psp, psp_v3_1_reroute_ih(psp); if (amdgpu_sriov_vf(adev)) { + ring->ring_wptr = 0; ret = psp_v3_1_ring_stop(psp, ring_type); if (ret) { DRM_ERROR("psp_v3_1_ring_stop_sriov failed!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 9f0dda040ec8..4509bd4cce2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -271,7 +271,7 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, } /** - * sdma_v2_4_hdp_flush_ring_emit - emit an hdp flush on the DMA ring + * sdma_v2_4_ring_emit_hdp_flush - emit an hdp flush on the DMA ring * * @ring: amdgpu ring pointer * diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 5715be6770ec..ae5464e2535a 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -754,7 +754,7 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring) } /** - * sdma_v4_0_page_ring_set_wptr - commit the write pointer + * sdma_v4_0_ring_set_wptr - commit the write pointer * * @ring: amdgpu ring pointer * @@ -820,7 +820,7 @@ static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring) } /** - * sdma_v4_0_ring_set_wptr - commit the write pointer + * sdma_v4_0_page_ring_set_wptr - commit the write pointer * * @ring: amdgpu ring pointer * @@ -1109,6 +1109,8 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) if (adev->asic_type == CHIP_ARCTURUS && adev->sdma.instance[i].fw_version >= 14) WREG32_SDMA(i, mmSDMA0_PUB_DUMMY_REG2, enable); + /* Extend page fault timeout to avoid interrupt storm */ + WREG32_SDMA(i, mmSDMA0_UTCL1_TIMEOUT, 0x00800080); } } @@ -2227,7 +2229,7 @@ static int sdma_v4_0_print_iv_entry(struct amdgpu_device *adev, memset(&task_info, 0, sizeof(struct amdgpu_task_info)); amdgpu_vm_get_task_info(adev, entry->pasid, &task_info); - dev_info(adev->dev, + dev_dbg_ratelimited(adev->dev, "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u " "pasid:%u, for process %s pid %d thread %s pid %d\n", instance, addr, entry->src_id, entry->ring_id, entry->vmid, @@ -2240,7 +2242,7 @@ static int sdma_v4_0_process_vm_hole_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - dev_err(adev->dev, "MC or SEM address in VM hole\n"); + dev_dbg_ratelimited(adev->dev, "MC or SEM address in VM hole\n"); sdma_v4_0_print_iv_entry(adev, entry); return 0; } @@ -2249,7 +2251,7 @@ static int sdma_v4_0_process_doorbell_invalid_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - dev_err(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n"); + dev_dbg_ratelimited(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n"); sdma_v4_0_print_iv_entry(adev, entry); return 0; } @@ -2258,7 +2260,7 @@ static int sdma_v4_0_process_pool_timeout_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - dev_err(adev->dev, + dev_dbg_ratelimited(adev->dev, "Polling register/memory timeout executing POLL_REG/MEM with finite timer\n"); sdma_v4_0_print_iv_entry(adev, entry); return 0; @@ -2268,7 +2270,7 @@ static int sdma_v4_0_process_srbm_write_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - dev_err(adev->dev, + dev_dbg_ratelimited(adev->dev, "SDMA gets an Register Write SRBM_WRITE command in non-privilege command buffer\n"); sdma_v4_0_print_iv_entry(adev, entry); return 0; @@ -2597,27 +2599,18 @@ static const struct amdgpu_irq_src_funcs sdma_v4_0_srbm_write_irq_funcs = { static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev) { + adev->sdma.trap_irq.num_types = adev->sdma.num_instances; + adev->sdma.ecc_irq.num_types = adev->sdma.num_instances; + /*For Arcturus and Aldebaran, add another 4 irq handler*/ switch (adev->sdma.num_instances) { - case 1: - adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE1; - adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE1; - break; case 5: - adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE5; - adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE5; - break; case 8: - adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; - adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_LAST; - adev->sdma.vm_hole_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE5; - adev->sdma.doorbell_invalid_irq.num_types = AMDGPU_SDMA_IRQ_LAST; - adev->sdma.pool_timeout_irq.num_types = AMDGPU_SDMA_IRQ_LAST; - adev->sdma.srbm_write_irq.num_types = AMDGPU_SDMA_IRQ_LAST; + adev->sdma.vm_hole_irq.num_types = adev->sdma.num_instances; + adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances; + adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances; + adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances; break; - case 2: default: - adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE2; - adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE2; break; } adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 8859133ce37e..6117ba8a4c3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -328,9 +328,9 @@ static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring) wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs])); DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr); } else { - wptr = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)); + wptr = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)); wptr = wptr << 32; - wptr |= RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)); + wptr |= RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)); DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr); } @@ -371,9 +371,9 @@ static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring) lower_32_bits(ring->wptr << 2), ring->me, upper_32_bits(ring->wptr << 2)); - WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2)); - WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2)); } } @@ -409,18 +409,6 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring, unsigned vmid = AMDGPU_JOB_GET_VMID(job); uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid); - /* Invalidate L2, because if we don't do it, we might get stale cache - * lines from previous IBs. - */ - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ)); - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV | - SDMA_GCR_GL2_WB | - SDMA_GCR_GLM_INV | - SDMA_GCR_GLM_WB) << 16); - amdgpu_ring_write(ring, 0xffffff80); - amdgpu_ring_write(ring, 0xffff); - /* An IB packet must end on a 8 DW boundary--the next dword * must be on a 8-dword boundary. Our IB packet below is 6 * dwords long, thus add x number of NOPs, such that, in @@ -442,6 +430,32 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring, } /** + * sdma_v5_0_ring_emit_mem_sync - flush the IB by graphics cache rinse + * + * @ring: amdgpu ring pointer + * @job: job to retrieve vmid from + * @ib: IB object to schedule + * + * flush the IB by graphics cache rinse. + */ +static void sdma_v5_0_ring_emit_mem_sync(struct amdgpu_ring *ring) +{ + uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV | + SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV | + SDMA_GCR_GLI_INV(1); + + /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ)); + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0)); + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) | + SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0)); + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) | + SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16)); + amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) | + SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0)); +} + +/** * sdma_v5_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring * * @ring: amdgpu ring pointer @@ -534,12 +548,12 @@ static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev) amdgpu_ttm_set_buffer_funcs_status(adev, false); for (i = 0; i < adev->sdma.num_instances; i++) { - rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); } } @@ -556,7 +570,7 @@ static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev) } /** - * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch + * sdma_v5_0_ctx_switch_enable - stop the async dma engines context switch * * @adev: amdgpu_device pointer * @enable: enable/disable the DMA MEs context switch. @@ -600,11 +614,11 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) } if (enable && amdgpu_sdma_phase_quantum) { - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM), phase_quantum); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM), phase_quantum); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM), phase_quantum); } if (!amdgpu_sriov_vf(adev)) @@ -671,58 +685,63 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) /* Set ring buffer size in dwords */ rb_bufsz = order_base_2(ring->ring_size / 4); - rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); #ifdef __BIG_ENDIAN rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_SWAP_ENABLE, 1); #endif - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); /* Initialize the ring buffer's read and write pointers */ - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0); /* setup the wptr shadow polling */ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), lower_32_bits(wptr_gpu_addr)); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), upper_32_bits(wptr_gpu_addr)); - wptr_poll_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, + wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl); /* set the wb address whether it's enabled or not */ - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), + ring->gpu_addr >> 8); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), + ring->gpu_addr >> 40); ring->wptr = 0; /* before programing wptr to a less value, need set minor_ptr_update first */ - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), + lower_32_bits(ring->wptr) << 2); + WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), + upper_32_bits(ring->wptr) << 2); } - doorbell = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); - doorbell_offset = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET)); + doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); + doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, + mmSDMA0_GFX_DOORBELL_OFFSET)); if (ring->use_doorbell) { doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); @@ -731,8 +750,9 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) } else { doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); } - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), + doorbell_offset); adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index, 20); @@ -741,7 +761,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) sdma_v5_0_ring_set_wptr(ring); /* set minor_ptr_update to 0 after wptr programed */ - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); if (!amdgpu_sriov_vf(adev)) { /* set utc l1 enable flag always to 1 */ @@ -775,15 +795,15 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) /* enable DMA RB */ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); #ifdef __BIG_ENDIAN ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); #endif /* enable DMA IBs */ - WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); ring->sched.ready = true; @@ -1647,6 +1667,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { 10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */ .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */ .emit_ib = sdma_v5_0_ring_emit_ib, + .emit_mem_sync = sdma_v5_0_ring_emit_mem_sync, .emit_fence = sdma_v5_0_ring_emit_fence, .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync, .emit_vm_flush = sdma_v5_0_ring_emit_vm_flush, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 240596b25fe4..7486e5306786 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -47,8 +47,10 @@ MODULE_FIRMWARE("amdgpu/sienna_cichlid_sdma.bin"); MODULE_FIRMWARE("amdgpu/navy_flounder_sdma.bin"); MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_sdma.bin"); +MODULE_FIRMWARE("amdgpu/beige_goby_sdma.bin"); MODULE_FIRMWARE("amdgpu/vangogh_sdma.bin"); +MODULE_FIRMWARE("amdgpu/yellow_carp_sdma.bin"); #define SDMA1_REG_OFFSET 0x600 #define SDMA3_REG_OFFSET 0x400 @@ -92,6 +94,8 @@ static void sdma_v5_2_init_golden_registers(struct amdgpu_device *adev) case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: break; default: break; @@ -145,9 +149,6 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev) struct amdgpu_firmware_info *info = NULL; const struct common_firmware_header *header = NULL; - if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_SIENNA_CICHLID)) - return 0; - DRM_DEBUG("\n"); switch (adev->asic_type) { @@ -163,6 +164,12 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev) case CHIP_DIMGREY_CAVEFISH: chip_name = "dimgrey_cavefish"; break; + case CHIP_BEIGE_GOBY: + chip_name = "beige_goby"; + break; + case CHIP_YELLOW_CARP: + chip_name = "yellow_carp"; + break; default: BUG(); } @@ -182,6 +189,9 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev) (void *)&adev->sdma.instance[0], sizeof(struct amdgpu_sdma_instance)); + if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_SIENNA_CICHLID)) + return 0; + DRM_DEBUG("psp_load == '%s'\n", adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false"); @@ -490,12 +500,12 @@ static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev) amdgpu_ttm_set_buffer_funcs_status(adev, false); for (i = 0; i < adev->sdma.num_instances; i++) { - rb_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); } } @@ -512,7 +522,7 @@ static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev) } /** - * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch + * sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch * * @adev: amdgpu_device pointer * @enable: enable/disable the DMA MEs context switch. @@ -553,11 +563,11 @@ static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable) f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, AUTO_CTXSW_ENABLE, enable ? 1 : 0); if (enable && amdgpu_sdma_phase_quantum) { - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM), + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM), phase_quantum); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM), + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM), phase_quantum); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM), + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM), phase_quantum); } WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); @@ -615,62 +625,62 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) ring = &adev->sdma.instance[i].ring; wb_offset = (ring->rptr_offs * 4); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0); /* Set ring buffer size in dwords */ rb_bufsz = order_base_2(ring->ring_size / 4); - rb_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); + rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); #ifdef __BIG_ENDIAN rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_SWAP_ENABLE, 1); #endif - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); /* Initialize the ring buffer's read and write pointers */ - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0); /* setup the wptr shadow polling */ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), lower_32_bits(wptr_gpu_addr)); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), upper_32_bits(wptr_gpu_addr)); - wptr_poll_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, + wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl); /* set the wb address whether it's enabled or not */ - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI), upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO), lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40); ring->wptr = 0; /* before programing wptr to a less value, need set minor_ptr_update first */ - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1); if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */ WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2); WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); } - doorbell = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); - doorbell_offset = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET)); + doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL)); + doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET)); if (ring->use_doorbell) { doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); @@ -679,8 +689,8 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) } else { doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); } - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index, @@ -690,7 +700,7 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) sdma_v5_2_ring_set_wptr(ring); /* set minor_ptr_update to 0 after wptr programed */ - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0); /* set utc l1 enable flag always to 1 */ temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); @@ -701,19 +711,19 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp); /* Set up RESP_MODE to non-copy addresses */ - temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); + temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL)); temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp); /* program default cache read and write policy */ - temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); + temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE)); /* clean read policy and write policy bits */ temp &= 0xFF0FFF; temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14) | SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp); if (!amdgpu_sriov_vf(adev)) { /* unhalt engine */ @@ -724,15 +734,15 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) /* enable DMA RB */ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl); - ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); + ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL)); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); #ifdef __BIG_ENDIAN ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); #endif /* enable DMA IBs */ - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl); ring->sched.ready = true; @@ -1223,6 +1233,8 @@ static int sdma_v5_2_early_init(void *handle) adev->sdma.num_instances = 2; break; case CHIP_VANGOGH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: adev->sdma.num_instances = 1; break; default: @@ -1551,6 +1563,10 @@ static void sdma_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *ade int i; for (i = 0; i < adev->sdma.num_instances; i++) { + + if (adev->sdma.instance[i].fw_version < 70 && adev->asic_type == CHIP_VANGOGH) + adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_MGCG; + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { /* Enable sdma clock gating */ def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL)); @@ -1584,6 +1600,10 @@ static void sdma_v5_2_update_medium_grain_light_sleep(struct amdgpu_device *adev int i; for (i = 0; i < adev->sdma.num_instances; i++) { + + if (adev->sdma.instance[i].fw_version < 70 && adev->asic_type == CHIP_VANGOGH) + adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_LS; + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { /* Enable sdma mem light sleep */ def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL)); @@ -1615,6 +1635,8 @@ static int sdma_v5_2_set_clockgating_state(void *handle, case CHIP_NAVY_FLOUNDER: case CHIP_VANGOGH: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: + case CHIP_YELLOW_CARP: sdma_v5_2_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); sdma_v5_2_update_medium_grain_light_sleep(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index cb703e307238..195b45bcb8ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -305,7 +305,7 @@ err0: } /** - * cik_dma_vm_copy_pte - update PTEs by copying them from the GART + * si_dma_vm_copy_pte - update PTEs by copying them from the GART * * @ib: indirect buffer to fill with commands * @pe: addr of the page entry @@ -402,7 +402,7 @@ static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib, } /** - * si_dma_pad_ib - pad the IB to the required number of dw + * si_dma_ring_pad_ib - pad the IB to the required number of dw * * @ring: amdgpu_ring pointer * @ib: indirect buffer to fill with padding @@ -415,7 +415,7 @@ static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) } /** - * cik_sdma_ring_emit_pipeline_sync - sync the pipeline + * si_dma_ring_emit_pipeline_sync - sync the pipeline * * @ring: amdgpu_ring pointer * diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index 51880f6ef634..9a24f17a5750 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -175,8 +175,7 @@ static int si_ih_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev, &adev->irq.ih); + amdgpu_irq_fini_sw(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c index 079b094c48ad..39b7c206770f 100644 --- a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c @@ -89,7 +89,24 @@ static u32 smuio_v13_0_get_die_id(struct amdgpu_device *adev) } /** - * smuio_v13_0_supports_host_gpu_xgmi - detect xgmi interface between cpu and gpu/s. + * smuio_v13_0_get_socket_id - query socket id from FCH + * + * @adev: amdgpu device pointer + * + * Returns socket id + */ +static u32 smuio_v13_0_get_socket_id(struct amdgpu_device *adev) +{ + u32 data, socket_id; + + data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG); + socket_id = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, SOCKET_ID); + + return socket_id; +} + +/** + * smuio_v13_0_is_host_gpu_xgmi_supported - detect xgmi interface between cpu and gpu/s. * * @adev: amdgpu device pointer * @@ -115,6 +132,7 @@ const struct amdgpu_smuio_funcs smuio_v13_0_funcs = { .get_rom_index_offset = smuio_v13_0_get_rom_index_offset, .get_rom_data_offset = smuio_v13_0_get_rom_data_offset, .get_die_id = smuio_v13_0_get_die_id, + .get_socket_id = smuio_v13_0_get_socket_id, .is_host_gpu_xgmi_supported = smuio_v13_0_is_host_gpu_xgmi_supported, .update_rom_clock_gating = smuio_v13_0_update_rom_clock_gating, .get_clock_gating_state = smuio_v13_0_get_clock_gating_state, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index e65c286f93a6..de85577c9cfd 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -633,7 +633,9 @@ void soc15_program_register_sequence(struct amdgpu_device *adev, if (entry->and_mask == 0xffffffff) { tmp = entry->or_mask; } else { - tmp = RREG32(reg); + tmp = (entry->hwip == GC_HWIP) ? + RREG32_SOC15_IP(GC, reg) : RREG32(reg); + tmp &= ~(entry->and_mask); tmp |= (entry->or_mask & entry->and_mask); } @@ -644,7 +646,8 @@ void soc15_program_register_sequence(struct amdgpu_device *adev, reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG)) WREG32_RLC(reg, tmp); else - WREG32(reg, tmp); + (entry->hwip == GC_HWIP) ? + WREG32_SOC15_IP(GC, reg, tmp) : WREG32(reg, tmp); } @@ -656,7 +659,7 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev) int ret = 0; /* avoid NBIF got stuck when do RAS recovery in BACO reset */ - if (ras && ras->supported) + if (ras && adev->ras_enabled) adev->nbio.funcs->enable_doorbell_interrupt(adev, false); ret = amdgpu_dpm_baco_reset(adev); @@ -664,7 +667,7 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev) return ret; /* re-enable doorbell interrupt after BACO exit */ - if (ras && ras->supported) + if (ras && adev->ras_enabled) adev->nbio.funcs->enable_doorbell_interrupt(adev, true); return 0; @@ -711,7 +714,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev) * 1. PMFW version > 0x284300: all cases use baco * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco */ - if ((ras && ras->supported) && adev->pm.fw_version <= 0x283400) + if (ras && adev->ras_enabled && + adev->pm.fw_version <= 0x283400) baco_reset = false; break; case CHIP_ALDEBARAN: @@ -817,7 +821,7 @@ static void soc15_pcie_gen3_enable(struct amdgpu_device *adev) static void soc15_program_aspm(struct amdgpu_device *adev) { - if (amdgpu_aspm != 1) + if (!amdgpu_aspm) return; if (!(adev->flags & AMD_IS_APU) && @@ -1521,9 +1525,6 @@ static int soc15_common_late_init(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_get_irq(adev); - if (adev->hdp.funcs->reset_ras_error_count) - adev->hdp.funcs->reset_ras_error_count(adev); - if (adev->nbio.ras_funcs && adev->nbio.ras_funcs->ras_late_init) r = adev->nbio.ras_funcs->ras_late_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 14bd794bbea6..0eeb5e073be8 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -27,28 +27,51 @@ /* Register Access Macros */ #define SOC15_REG_OFFSET(ip, inst, reg) (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) +#define __WREG32_SOC15_RLC__(reg, value, flag, hwip) \ + ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->rlcg_wreg) ? \ + adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, flag, hwip) : \ + WREG32(reg, value)) + +#define __RREG32_SOC15_RLC__(reg, flag, hwip) \ + ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->rlcg_rreg) ? \ + adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, flag, hwip) : \ + RREG32(reg)) + #define WREG32_FIELD15(ip, idx, reg, field, val) \ - WREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \ - (RREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \ - & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) + __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \ + (__RREG32_SOC15_RLC__( \ + adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \ + 0, ip##_HWIP) & \ + ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \ + 0, ip##_HWIP) #define RREG32_SOC15(ip, inst, reg) \ - RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ + 0, ip##_HWIP) + +#define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP) #define RREG32_SOC15_NO_KIQ(ip, inst, reg) \ - RREG32_NO_KIQ(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ + AMDGPU_REGS_NO_KIQ, ip##_HWIP) #define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \ - RREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset) + __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, 0, ip##_HWIP) #define WREG32_SOC15(ip, inst, reg, value) \ - WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value) + __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), \ + value, 0, ip##_HWIP) + +#define WREG32_SOC15_IP(ip, reg, value) \ + __WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP) #define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \ - WREG32_NO_KIQ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value) + __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \ + value, AMDGPU_REGS_NO_KIQ, ip##_HWIP) #define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \ - WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value) + __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, \ + value, 0, ip##_HWIP) #define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask) \ ({ int ret = 0; \ @@ -77,12 +100,7 @@ }) #define WREG32_RLC(reg, value) \ - do { \ - if (adev->gfx.rlc.funcs->rlcg_wreg) \ - adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, 0); \ - else \ - WREG32(reg, value); \ - } while (0) + __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_RLC, GC_HWIP) #define WREG32_RLC_EX(prefix, reg, value) \ do { \ @@ -108,24 +126,19 @@ } \ } while (0) +/* shadow the registers in the callback function */ #define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \ - WREG32_RLC((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value) + __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value, AMDGPU_REGS_RLC, GC_HWIP) +/* for GC only */ #define RREG32_RLC(reg) \ - (adev->gfx.rlc.funcs->rlcg_rreg ? \ - adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, 0) : RREG32(reg)) - -#define WREG32_RLC_NO_KIQ(reg, value) \ - do { \ - if (adev->gfx.rlc.funcs->rlcg_wreg) \ - adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, AMDGPU_REGS_NO_KIQ); \ - else \ - WREG32_NO_KIQ(reg, value); \ - } while (0) + __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_RLC, GC_HWIP) + +#define WREG32_RLC_NO_KIQ(reg, value, hwip) \ + __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip) -#define RREG32_RLC_NO_KIQ(reg) \ - (adev->gfx.rlc.funcs->rlcg_rreg ? \ - adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, AMDGPU_REGS_NO_KIQ) : RREG32_NO_KIQ(reg)) +#define RREG32_RLC_NO_KIQ(reg, hwip) \ + __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip) #define WREG32_SOC15_RLC_SHADOW_EX(prefix, ip, inst, reg, value) \ do { \ @@ -146,12 +159,12 @@ } while (0) #define RREG32_SOC15_RLC(ip, inst, reg) \ - RREG32_RLC(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, AMDGPU_REGS_RLC, ip##_HWIP) #define WREG32_SOC15_RLC(ip, inst, reg, value) \ do { \ uint32_t target_reg = adev->reg_offset[ip##_HWIP][0][reg##_BASE_IDX] + reg;\ - WREG32_RLC(target_reg, value); \ + __WREG32_SOC15_RLC__(target_reg, value, AMDGPU_REGS_RLC, ip##_HWIP); \ } while (0) #define WREG32_SOC15_RLC_EX(prefix, ip, inst, reg, value) \ @@ -161,14 +174,16 @@ } while (0) #define WREG32_FIELD15_RLC(ip, idx, reg, field, val) \ - WREG32_RLC((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \ - (RREG32_RLC(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \ - & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) + __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \ + (__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \ + AMDGPU_REGS_RLC, ip##_HWIP) & \ + ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \ + AMDGPU_REGS_RLC, ip##_HWIP) #define WREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset, value) \ - WREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset), value) + __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value, AMDGPU_REGS_RLC, ip##_HWIP) #define RREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset) \ - RREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset)) + __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, AMDGPU_REGS_RLC, ip##_HWIP) #endif diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h index 745ed0fba1ed..0f214a398dd8 100644 --- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h +++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h @@ -105,6 +105,12 @@ struct ta_ras_trigger_error_input { uint64_t value; // method if error injection. i.e persistent, coherent etc. }; +struct ta_ras_init_flags +{ + uint8_t poison_mode_en; + uint8_t dgpu_mode; +}; + struct ta_ras_output_flags { uint8_t ras_init_success_flag; @@ -115,6 +121,7 @@ struct ta_ras_output_flags /* Common input structure for RAS callbacks */ /**********************************************************/ union ta_ras_cmd_input { + struct ta_ras_init_flags init_flags; struct ta_ras_enable_features_input enable_features; struct ta_ras_disable_features_input disable_features; struct ta_ras_trigger_error_input trigger_error; diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index 249fcbee7871..b08905d1c00f 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -312,8 +312,7 @@ static int tonga_ih_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev, &adev->irq.ih); + amdgpu_irq_fini_sw(adev); amdgpu_irq_remove_domain(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c index 89d20adfa001..af59a35788e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c @@ -234,7 +234,7 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev, err_addr &= ~((0x1ULL << lsb) - 1); /* translate umc channel address to soc pa, 3 parts are included */ - retired_page = ADDR_OF_8KB_BLOCK(err_addr) | + retired_page = ADDR_OF_4KB_BLOCK(err_addr) | ADDR_OF_256B_BLOCK(channel_index) | OFFSET_IN_256B_BLOCK(err_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c index 284447d7a579..6c0e91495365 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c @@ -340,7 +340,7 @@ static int uvd_v3_1_start(struct amdgpu_device *adev) /* enable VCPU clock */ WREG32(mmUVD_VCPU_CNTL, 1 << 9); - /* disable interupt */ + /* disable interrupt */ WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); #ifdef __BIG_ENDIAN @@ -405,7 +405,7 @@ static int uvd_v3_1_start(struct amdgpu_device *adev) return r; } - /* enable interupt */ + /* enable interrupt */ WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1)); WREG32_P(mmUVD_STATUS, 0, ~(1<<2)); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 0cd98fcb1f9f..939bcfa2a4ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -363,6 +363,7 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); + amdgpu_bo_unpin(bo); amdgpu_bo_unreserve(bo); amdgpu_bo_unref(&bo); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 8e238dea7bef..90910d19db12 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -25,6 +25,7 @@ */ #include <linux/firmware.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_vce.h" @@ -555,16 +556,19 @@ static int vce_v4_0_hw_fini(void *handle) static int vce_v4_0_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int r; + int r, idx; if (adev->vce.vcpu_bo == NULL) return 0; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo); - void *ptr = adev->vce.cpu_addr; + if (drm_dev_enter(&adev->ddev, &idx)) { + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo); + void *ptr = adev->vce.cpu_addr; - memcpy_fromio(adev->vce.saved_bo, ptr, size); + memcpy_fromio(adev->vce.saved_bo, ptr, size); + } + drm_dev_exit(idx); } r = vce_v4_0_hw_fini(adev); @@ -577,16 +581,20 @@ static int vce_v4_0_suspend(void *handle) static int vce_v4_0_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int r; + int r, idx; if (adev->vce.vcpu_bo == NULL) return -EINVAL; if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo); - void *ptr = adev->vce.cpu_addr; - memcpy_toio(ptr, adev->vce.saved_bo, size); + if (drm_dev_enter(&adev->ddev, &idx)) { + unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo); + void *ptr = adev->vce.cpu_addr; + + memcpy_toio(ptr, adev->vce.saved_bo, size); + drm_dev_exit(idx); + } } else { r = amdgpu_vce_resume(adev); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 27b1ced145d2..284bb42d6c86 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -769,7 +769,7 @@ static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev) } /** - * vcn_v1_0_start - start VCN block + * vcn_v1_0_start_spg_mode - start VCN block * * @adev: amdgpu_device pointer * @@ -1105,7 +1105,7 @@ static int vcn_v1_0_start(struct amdgpu_device *adev) } /** - * vcn_v1_0_stop - stop VCN block + * vcn_v1_0_stop_spg_mode - stop VCN block * * @adev: amdgpu_device pointer * diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 3b23de996db2..47d4f04cbd69 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -34,6 +34,8 @@ #include "vcn/vcn_3_0_0_sh_mask.h" #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" +#include <drm/drm_drv.h> + #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27 #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10 @@ -85,16 +87,18 @@ static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring); static int vcn_v3_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; if (amdgpu_sriov_vf(adev)) { - adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID; + for (i = 0; i < VCN_INSTANCES_SIENNA_CICHLID; i++) + if (amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, i)) + adev->vcn.num_vcn_inst++; adev->vcn.harvest_config = 0; adev->vcn.num_enc_rings = 1; } else { if (adev->asic_type == CHIP_SIENNA_CICHLID) { u32 harvest; - int i; adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID; for (i = 0; i < adev->vcn.num_vcn_inst; i++) { @@ -110,7 +114,10 @@ static int vcn_v3_0_early_init(void *handle) } else adev->vcn.num_vcn_inst = 1; - adev->vcn.num_enc_rings = 2; + if (adev->asic_type == CHIP_BEIGE_GOBY) + adev->vcn.num_enc_rings = 0; + else + adev->vcn.num_enc_rings = 2; } vcn_v3_0_set_dec_ring_funcs(adev); @@ -146,7 +153,8 @@ static int vcn_v3_0_sw_init(void *handle) adev->firmware.fw_size += ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); - if (adev->vcn.num_vcn_inst == VCN_INSTANCES_SIENNA_CICHLID) { + if ((adev->vcn.num_vcn_inst == VCN_INSTANCES_SIENNA_CICHLID) || + (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)) { adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1; adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw; adev->firmware.fw_size += @@ -268,16 +276,20 @@ static int vcn_v3_0_sw_init(void *handle) static int vcn_v3_0_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i, r; + int i, r, idx; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - volatile struct amdgpu_fw_shared *fw_shared; + if (drm_dev_enter(&adev->ddev, &idx)) { + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + volatile struct amdgpu_fw_shared *fw_shared; - if (adev->vcn.harvest_config & (1 << i)) - continue; - fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; - fw_shared->present_flag_0 = 0; - fw_shared->sw_ring.is_enabled = false; + if (adev->vcn.harvest_config & (1 << i)) + continue; + fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; + fw_shared->present_flag_0 = 0; + fw_shared->sw_ring.is_enabled = false; + } + + drm_dev_exit(idx); } if (amdgpu_sriov_vf(adev)) @@ -316,19 +328,17 @@ static int vcn_v3_0_hw_init(void *handle) continue; ring = &adev->vcn.inst[i].ring_dec; - if (ring->sched.ready) { - ring->wptr = 0; - ring->wptr_old = 0; - vcn_v3_0_dec_ring_set_wptr(ring); - } + ring->wptr = 0; + ring->wptr_old = 0; + vcn_v3_0_dec_ring_set_wptr(ring); + ring->sched.ready = true; for (j = 0; j < adev->vcn.num_enc_rings; ++j) { ring = &adev->vcn.inst[i].ring_enc[j]; - if (ring->sched.ready) { - ring->wptr = 0; - ring->wptr_old = 0; - vcn_v3_0_enc_ring_set_wptr(ring); - } + ring->wptr = 0; + ring->wptr_old = 0; + vcn_v3_0_enc_ring_set_wptr(ring); + ring->sched.ready = true; } } } else { @@ -1254,23 +1264,25 @@ static int vcn_v3_0_start(struct amdgpu_device *adev) fw_shared->rb.wptr = lower_32_bits(ring->wptr); fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); - fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); - ring = &adev->vcn.inst[i].ring_enc[0]; - WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr); - WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4); - fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); - - fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); - ring = &adev->vcn.inst[i].ring_enc[1]; - WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); - WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); - WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr); - WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4); - fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); + if (adev->asic_type != CHIP_BEIGE_GOBY) { + fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); + ring = &adev->vcn.inst[i].ring_enc[0]; + WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4); + fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); + + fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); + ring = &adev->vcn.inst[i].ring_enc[1]; + WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr); + WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4); + fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); + } } return 0; @@ -1293,8 +1305,6 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev) uint32_t table_size; uint32_t size, size_dw; - bool is_vcn_ready; - struct mmsch_v3_0_cmd_direct_write direct_wt = { {0} }; struct mmsch_v3_0_cmd_direct_read_modify_write @@ -1486,30 +1496,6 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev) } } - /* 6, check each VCN's init_status - * if it remains as 0, then this VCN is not assigned to current VF - * do not start ring for this VCN - */ - size = sizeof(struct mmsch_v3_0_init_header); - table_loc = (uint32_t *)table->cpu_addr; - memcpy(&header, (void *)table_loc, size); - - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { - if (adev->vcn.harvest_config & (1 << i)) - continue; - - is_vcn_ready = (header.inst[i].init_status == 1); - if (!is_vcn_ready) - DRM_INFO("VCN(%d) engine is disabled by hypervisor\n", i); - - ring = &adev->vcn.inst[i].ring_dec; - ring->sched.ready = is_vcn_ready; - for (j = 0; j < adev->vcn.num_enc_rings; ++j) { - ring = &adev->vcn.inst[i].ring_enc[j]; - ring->sched.ready = is_vcn_ready; - } - } - return 0; } @@ -1650,31 +1636,33 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev, UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); - /* Restore */ - fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr; - fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); - ring = &adev->vcn.inst[inst_idx].ring_enc[0]; - ring->wptr = 0; - WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr); - WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4); - WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); - fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); - - fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); - ring = &adev->vcn.inst[inst_idx].ring_enc[1]; - ring->wptr = 0; - WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr); - WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4); - WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); - WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); - fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); - - /* restore wptr/rptr with pointers saved in FW shared memory*/ - WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr); - WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr); + if (adev->asic_type != CHIP_BEIGE_GOBY) { + /* Restore */ + fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr; + fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); + ring = &adev->vcn.inst[inst_idx].ring_enc[0]; + ring->wptr = 0; + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); + + fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); + ring = &adev->vcn.inst[inst_idx].ring_enc[1]; + ring->wptr = 0; + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); + + /* restore wptr/rptr with pointers saved in FW shared memory*/ + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr); + WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr); + } /* Unstall DPG */ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), @@ -2131,7 +2119,8 @@ static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev) adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs; adev->vcn.inst[i].ring_enc[j].me = i; } - DRM_INFO("VCN(%d) encode is enabled in VM mode\n", i); + if (adev->vcn.num_enc_rings > 0) + DRM_INFO("VCN(%d) encode is enabled in VM mode\n", i); } } diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index ca8efa5c6978..a9ca6988009e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -104,6 +104,7 @@ static int vega10_ih_toggle_ring_interrupts(struct amdgpu_device *adev, tmp = RREG32(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1); /* enable_intr field is only valid in ring0 */ if (ih == &adev->irq.ih) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); @@ -513,11 +514,7 @@ static int vega10_ih_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft); - amdgpu_ih_ring_fini(adev, &adev->irq.ih2); - amdgpu_ih_ring_fini(adev, &adev->irq.ih1); - amdgpu_ih_ring_fini(adev, &adev->irq.ih); + amdgpu_irq_fini_sw(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index 8a122b413bf5..f51dfc38ac65 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -565,11 +565,7 @@ static int vega20_ih_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft); - amdgpu_ih_ring_fini(adev, &adev->irq.ih2); - amdgpu_ih_ring_fini(adev, &adev->irq.ih1); - amdgpu_ih_ring_fini(adev, &adev->irq.ih); + amdgpu_irq_fini_sw(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 735ebbd1148f..3d21c0799037 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1136,7 +1136,7 @@ static void vi_program_aspm(struct amdgpu_device *adev) bool bL1SS = false; bool bClkReqSupport = true; - if (amdgpu_aspm != 1) + if (!amdgpu_aspm) return; if (adev->flags & AMD_IS_APU || diff --git a/drivers/gpu/drm/amd/amdgpu/yellow_carp_reg_init.c b/drivers/gpu/drm/amd/amdgpu/yellow_carp_reg_init.c new file mode 100644 index 000000000000..3d89421275ed --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/yellow_carp_reg_init.c @@ -0,0 +1,51 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "nv.h" + +#include "soc15_common.h" +#include "soc15_hw_ip.h" +#include "yellow_carp_offset.h" + +int yellow_carp_reg_base_init(struct amdgpu_device *adev) +{ + /* HW has more IP blocks, only initialized the block needed by driver */ + uint32_t i; + for (i = 0 ; i < MAX_INSTANCE ; ++i) { + adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); + adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); + adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); + adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); + adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); + adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); + adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i])); + adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); + adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i])); + adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); + adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i])); + adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); + adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); + } + return 0; +} |