diff options
Diffstat (limited to 'drivers/platform/x86/amd')
-rw-r--r-- | drivers/platform/x86/amd/Makefile | 2 | ||||
-rw-r--r-- | drivers/platform/x86/amd/pmc-quirks.c | 176 | ||||
-rw-r--r-- | drivers/platform/x86/amd/pmc.c | 153 | ||||
-rw-r--r-- | drivers/platform/x86/amd/pmc.h | 44 | ||||
-rw-r--r-- | drivers/platform/x86/amd/pmf/Kconfig | 11 | ||||
-rw-r--r-- | drivers/platform/x86/amd/pmf/auto-mode.c | 143 | ||||
-rw-r--r-- | drivers/platform/x86/amd/pmf/cnqf.c | 75 | ||||
-rw-r--r-- | drivers/platform/x86/amd/pmf/core.c | 45 | ||||
-rw-r--r-- | drivers/platform/x86/amd/pmf/sps.c | 55 |
9 files changed, 645 insertions, 59 deletions
diff --git a/drivers/platform/x86/amd/Makefile b/drivers/platform/x86/amd/Makefile index 2c229198e24c..65732f0a3913 100644 --- a/drivers/platform/x86/amd/Makefile +++ b/drivers/platform/x86/amd/Makefile @@ -4,7 +4,7 @@ # AMD x86 Platform-Specific Drivers # -amd-pmc-y := pmc.o +amd-pmc-y := pmc.o pmc-quirks.o obj-$(CONFIG_AMD_PMC) += amd-pmc.o amd_hsmp-y := hsmp.o obj-$(CONFIG_AMD_HSMP) += amd_hsmp.o diff --git a/drivers/platform/x86/amd/pmc-quirks.c b/drivers/platform/x86/amd/pmc-quirks.c new file mode 100644 index 000000000000..362e7c0097d7 --- /dev/null +++ b/drivers/platform/x86/amd/pmc-quirks.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AMD SoC Power Management Controller Driver Quirks + * + * Copyright (c) 2023, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Mario Limonciello <mario.limonciello@amd.com> + */ + +#include <linux/dmi.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/slab.h> + +#include "pmc.h" + +struct quirk_entry { + u32 s2idle_bug_mmio; +}; + +static struct quirk_entry quirk_s2idle_bug = { + .s2idle_bug_mmio = 0xfed80380, +}; + +static const struct dmi_system_id fwbug_list[] = { + { + .ident = "L14 Gen2 AMD", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20X5"), + } + }, + { + .ident = "T14s Gen2 AMD", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20XF"), + } + }, + { + .ident = "X13 Gen2 AMD", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20XH"), + } + }, + { + .ident = "T14 Gen2 AMD", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20XK"), + } + }, + { + .ident = "T14 Gen1 AMD", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20UD"), + } + }, + { + .ident = "T14 Gen1 AMD", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20UE"), + } + }, + { + .ident = "T14s Gen1 AMD", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20UH"), + } + }, + { + .ident = "T14s Gen1 AMD", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20UJ"), + } + }, + { + .ident = "P14s Gen1 AMD", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "20Y1"), + } + }, + { + .ident = "P14s Gen2 AMD", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21A0"), + } + }, + { + .ident = "P14s Gen2 AMD", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21A1"), + } + }, + /* https://gitlab.freedesktop.org/drm/amd/-/issues/2684 */ + { + .ident = "HP Laptop 15s-eq2xxx", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Laptop 15s-eq2xxx"), + } + }, + {} +}; + +/* + * Laptops that run a SMI handler during the D3->D0 transition that occurs + * specifically when exiting suspend to idle which can cause + * large delays during resume when the IOMMU translation layer is enabled (the default + * behavior) for NVME devices: + * + * To avoid this firmware problem, skip the SMI handler on these machines before the + * D0 transition occurs. + */ +static void amd_pmc_skip_nvme_smi_handler(u32 s2idle_bug_mmio) +{ + struct resource *res; + void __iomem *addr; + u8 val; + + res = request_mem_region_muxed(s2idle_bug_mmio, 1, "amd_pmc_pm80"); + if (!res) + return; + + addr = ioremap(s2idle_bug_mmio, 1); + if (!addr) + goto cleanup_resource; + + val = ioread8(addr); + iowrite8(val & ~BIT(0), addr); + + iounmap(addr); +cleanup_resource: + release_resource(res); + kfree(res); +} + +void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev) +{ + if (dev->quirks && dev->quirks->s2idle_bug_mmio) + amd_pmc_skip_nvme_smi_handler(dev->quirks->s2idle_bug_mmio); +} + +void amd_pmc_quirks_init(struct amd_pmc_dev *dev) +{ + const struct dmi_system_id *dmi_id; + + dmi_id = dmi_first_match(fwbug_list); + if (!dmi_id) + return; + dev->quirks = dmi_id->driver_data; + if (dev->quirks->s2idle_bug_mmio) + pr_info("Using s2idle quirk to avoid %s platform firmware bug\n", + dmi_id->ident); +} diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c index 427905714f79..c1e788b67a74 100644 --- a/drivers/platform/x86/amd/pmc.c +++ b/drivers/platform/x86/amd/pmc.c @@ -28,6 +28,8 @@ #include <linux/seq_file.h> #include <linux/uaccess.h> +#include "pmc.h" + /* SMU communication registers */ #define AMD_PMC_REGISTER_MESSAGE 0x538 #define AMD_PMC_REGISTER_RESPONSE 0x980 @@ -45,7 +47,6 @@ #define AMD_PMC_STB_DUMMY_PC 0xC6000007 /* STB S2D(Spill to DRAM) has different message port offset */ -#define STB_SPILL_TO_DRAM 0xBE #define AMD_S2D_REGISTER_MESSAGE 0xA20 #define AMD_S2D_REGISTER_RESPONSE 0xA80 #define AMD_S2D_REGISTER_ARGUMENT 0xA88 @@ -95,11 +96,11 @@ #define AMD_CPU_ID_CB 0x14D8 #define AMD_CPU_ID_PS 0x14E8 #define AMD_CPU_ID_SP 0x14A4 +#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507 #define PMC_MSG_DELAY_MIN_US 50 #define RESPONSE_REGISTER_LOOP_MAX 20000 -#define SOC_SUBSYSTEM_IP_MAX 12 #define DELAY_MIN_US 2000 #define DELAY_MAX_US 3000 #define FIFO_SIZE 4096 @@ -115,6 +116,7 @@ enum s2d_arg { S2D_PHYS_ADDR_LOW, S2D_PHYS_ADDR_HIGH, S2D_NUM_SAMPLES, + S2D_DRAM_SIZE, }; struct amd_pmc_bit_map { @@ -132,32 +134,21 @@ static const struct amd_pmc_bit_map soc15_ip_blk[] = { {"ISP", BIT(6)}, {"NBIO", BIT(7)}, {"DF", BIT(8)}, - {"USB0", BIT(9)}, - {"USB1", BIT(10)}, + {"USB3_0", BIT(9)}, + {"USB3_1", BIT(10)}, {"LAPIC", BIT(11)}, + {"USB3_2", BIT(12)}, + {"USB3_3", BIT(13)}, + {"USB3_4", BIT(14)}, + {"USB4_0", BIT(15)}, + {"USB4_1", BIT(16)}, + {"MPM", BIT(17)}, + {"JPEG", BIT(18)}, + {"IPU", BIT(19)}, + {"UMSCH", BIT(20)}, {} }; -struct amd_pmc_dev { - void __iomem *regbase; - void __iomem *smu_virt_addr; - void __iomem *stb_virt_addr; - void __iomem *fch_virt_addr; - bool msg_port; - u32 base_addr; - u32 cpu_id; - u32 active_ips; -/* SMU version information */ - u8 smu_program; - u8 major; - u8 minor; - u8 rev; - struct device *dev; - struct pci_dev *rdev; - struct mutex lock; /* generic mutex lock */ - struct dentry *dbgfs_dir; -}; - static bool enable_stb; module_param(enable_stb, bool, 0644); MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism"); @@ -194,8 +185,8 @@ struct smu_metrics { u64 timein_s0i3_totaltime; u64 timein_swdrips_lastcapture; u64 timein_swdrips_totaltime; - u64 timecondition_notmet_lastcapture[SOC_SUBSYSTEM_IP_MAX]; - u64 timecondition_notmet_totaltime[SOC_SUBSYSTEM_IP_MAX]; + u64 timecondition_notmet_lastcapture[32]; + u64 timecondition_notmet_totaltime[32]; } __packed; static int amd_pmc_stb_debugfs_open(struct inode *inode, struct file *filp) @@ -261,7 +252,7 @@ static int amd_pmc_stb_debugfs_open_v2(struct inode *inode, struct file *filp) dev->msg_port = 1; /* Get the num_samples to calculate the last push location */ - ret = amd_pmc_send_cmd(dev, S2D_NUM_SAMPLES, &num_samples, STB_SPILL_TO_DRAM, 1); + ret = amd_pmc_send_cmd(dev, S2D_NUM_SAMPLES, &num_samples, dev->s2d_msg_id, true); /* Clear msg_port for other SMU operation */ dev->msg_port = 0; if (ret) { @@ -308,6 +299,23 @@ static const struct file_operations amd_pmc_stb_debugfs_fops_v2 = { .release = amd_pmc_stb_debugfs_release_v2, }; +static void amd_pmc_get_ip_info(struct amd_pmc_dev *dev) +{ + switch (dev->cpu_id) { + case AMD_CPU_ID_PCO: + case AMD_CPU_ID_RN: + case AMD_CPU_ID_YC: + case AMD_CPU_ID_CB: + dev->num_ips = 12; + dev->s2d_msg_id = 0xBE; + break; + case AMD_CPU_ID_PS: + dev->num_ips = 21; + dev->s2d_msg_id = 0x85; + break; + } +} + static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev) { if (dev->cpu_id == AMD_CPU_ID_PCO) { @@ -317,15 +325,15 @@ static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev) /* Get Active devices list from SMU */ if (!dev->active_ips) - amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, 1); + amd_pmc_send_cmd(dev, 0, &dev->active_ips, SMU_MSG_GET_SUP_CONSTRAINTS, true); /* Get dram address */ if (!dev->smu_virt_addr) { u32 phys_addr_low, phys_addr_hi; u64 smu_phys_addr; - amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, 1); - amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, 1); + amd_pmc_send_cmd(dev, 0, &phys_addr_low, SMU_MSG_LOG_GETDRAM_ADDR_LO, true); + amd_pmc_send_cmd(dev, 0, &phys_addr_hi, SMU_MSG_LOG_GETDRAM_ADDR_HI, true); smu_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low); dev->smu_virt_addr = devm_ioremap(dev->dev, smu_phys_addr, @@ -335,8 +343,8 @@ static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev) } /* Start the logging */ - amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_RESET, 0); - amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, 0); + amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_RESET, false); + amd_pmc_send_cmd(dev, 0, NULL, SMU_MSG_LOG_START, false); return 0; } @@ -377,7 +385,7 @@ static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev) if (dev->cpu_id == AMD_CPU_ID_PCO) return -ENODEV; - rc = amd_pmc_send_cmd(dev, 0, &val, SMU_MSG_GETSMUVERSION, 1); + rc = amd_pmc_send_cmd(dev, 0, &val, SMU_MSG_GETSMUVERSION, true); if (rc) return rc; @@ -469,7 +477,7 @@ static int smu_fw_info_show(struct seq_file *s, void *unused) table.timeto_resume_to_os_lastcapture); seq_puts(s, "\n=== Active time (in us) ===\n"); - for (idx = 0 ; idx < SOC_SUBSYSTEM_IP_MAX ; idx++) { + for (idx = 0 ; idx < dev->num_ips ; idx++) { if (soc15_ip_blk[idx].bit_mask & dev->active_ips) seq_printf(s, "%-8s : %lld\n", soc15_ip_blk[idx].name, table.timecondition_notmet_lastcapture[idx]); @@ -543,7 +551,7 @@ static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev, } if (dev) - dev_dbg(pdev->dev, "SMU idlemask s0i3: 0x%x\n", val); + pm_pr_dbg("SMU idlemask s0i3: 0x%x\n", val); if (s) seq_printf(s, "SMU idlemask : 0x%x\n", val); @@ -562,6 +570,18 @@ static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev) debugfs_remove_recursive(dev->dbgfs_dir); } +static bool amd_pmc_is_stb_supported(struct amd_pmc_dev *dev) +{ + switch (dev->cpu_id) { + case AMD_CPU_ID_YC: + case AMD_CPU_ID_CB: + case AMD_CPU_ID_PS: + return true; + default: + return false; + } +} + static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev) { dev->dbgfs_dir = debugfs_create_dir("amd_pmc", NULL); @@ -573,8 +593,7 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev) &amd_pmc_idlemask_fops); /* Enable STB only when the module_param is set */ if (enable_stb) { - if (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB || - dev->cpu_id == AMD_CPU_ID_PS) + if (amd_pmc_is_stb_supported(dev)) debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev, &amd_pmc_stb_debugfs_fops_v2); else @@ -769,7 +788,7 @@ static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg) *arg |= (duration << 16); rc = rtc_alarm_irq_enable(rtc_device, 0); - dev_dbg(pdev->dev, "wakeup timer programmed for %lld seconds\n", duration); + pm_pr_dbg("wakeup timer programmed for %lld seconds\n", duration); return rc; } @@ -794,7 +813,7 @@ static void amd_pmc_s2idle_prepare(void) } msg = amd_pmc_get_os_hint(pdev); - rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, 0); + rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, false); if (rc) { dev_err(pdev->dev, "suspend failed: %d\n", rc); return; @@ -829,7 +848,7 @@ static int amd_pmc_dump_data(struct amd_pmc_dev *pdev) if (pdev->cpu_id == AMD_CPU_ID_PCO) return -ENODEV; - return amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0); + return amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, false); } static void amd_pmc_s2idle_restore(void) @@ -839,7 +858,7 @@ static void amd_pmc_s2idle_restore(void) u8 msg; msg = amd_pmc_get_os_hint(pdev); - rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0); + rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, false); if (rc) dev_err(pdev->dev, "resume failed: %d\n", rc); @@ -852,6 +871,8 @@ static void amd_pmc_s2idle_restore(void) /* Notify on failed entry */ amd_pmc_validate_deepest(pdev); + + amd_pmc_process_restore_quirks(pdev); } static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = { @@ -887,32 +908,69 @@ static const struct pci_device_id pmc_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PCO) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RV) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_SP) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) }, { } }; +static int amd_pmc_get_dram_size(struct amd_pmc_dev *dev) +{ + int ret; + + switch (dev->cpu_id) { + case AMD_CPU_ID_YC: + if (!(dev->major > 90 || (dev->major == 90 && dev->minor > 39))) { + ret = -EINVAL; + goto err_dram_size; + } + break; + default: + ret = -EINVAL; + goto err_dram_size; + } + + ret = amd_pmc_send_cmd(dev, S2D_DRAM_SIZE, &dev->dram_size, dev->s2d_msg_id, true); + if (ret || !dev->dram_size) + goto err_dram_size; + + return 0; + +err_dram_size: + dev_err(dev->dev, "DRAM size command not supported for this platform\n"); + return ret; +} + static int amd_pmc_s2d_init(struct amd_pmc_dev *dev) { u32 phys_addr_low, phys_addr_hi; u64 stb_phys_addr; u32 size = 0; + int ret; /* Spill to DRAM feature uses separate SMU message port */ dev->msg_port = 1; - amd_pmc_send_cmd(dev, S2D_TELEMETRY_SIZE, &size, STB_SPILL_TO_DRAM, 1); + /* Get num of IP blocks within the SoC */ + amd_pmc_get_ip_info(dev); + + amd_pmc_send_cmd(dev, S2D_TELEMETRY_SIZE, &size, dev->s2d_msg_id, true); if (size != S2D_TELEMETRY_BYTES_MAX) return -EIO; + /* Get DRAM size */ + ret = amd_pmc_get_dram_size(dev); + if (ret) + dev->dram_size = S2D_TELEMETRY_DRAMBYTES_MAX; + /* Get STB DRAM address */ - amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_LOW, &phys_addr_low, STB_SPILL_TO_DRAM, 1); - amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_HIGH, &phys_addr_hi, STB_SPILL_TO_DRAM, 1); + amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_LOW, &phys_addr_low, dev->s2d_msg_id, true); + amd_pmc_send_cmd(dev, S2D_PHYS_ADDR_HIGH, &phys_addr_hi, dev->s2d_msg_id, true); stb_phys_addr = ((u64)phys_addr_hi << 32 | phys_addr_low); /* Clear msg_port for other SMU operation */ dev->msg_port = 0; - dev->stb_virt_addr = devm_ioremap(dev->dev, stb_phys_addr, S2D_TELEMETRY_DRAMBYTES_MAX); + dev->stb_virt_addr = devm_ioremap(dev->dev, stb_phys_addr, dev->dram_size); if (!dev->stb_virt_addr) return -ENOMEM; @@ -1001,7 +1059,7 @@ static int amd_pmc_probe(struct platform_device *pdev) mutex_init(&dev->lock); - if (enable_stb && (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB)) { + if (enable_stb && amd_pmc_is_stb_supported(dev)) { err = amd_pmc_s2d_init(dev); if (err) goto err_pci_dev_put; @@ -1012,6 +1070,8 @@ static int amd_pmc_probe(struct platform_device *pdev) err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops); if (err) dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n"); + if (!disable_workarounds) + amd_pmc_quirks_init(dev); } amd_pmc_dbgfs_register(dev); @@ -1040,6 +1100,7 @@ static const struct acpi_device_id amd_pmc_acpi_ids[] = { {"AMDI0007", 0}, {"AMDI0008", 0}, {"AMDI0009", 0}, + {"AMDI000A", 0}, {"AMD0004", 0}, {"AMD0005", 0}, { } diff --git a/drivers/platform/x86/amd/pmc.h b/drivers/platform/x86/amd/pmc.h new file mode 100644 index 000000000000..c27bd6a5642f --- /dev/null +++ b/drivers/platform/x86/amd/pmc.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * AMD SoC Power Management Controller Driver + * + * Copyright (c) 2023, Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Author: Mario Limonciello <mario.limonciello@amd.com> + */ + +#ifndef PMC_H +#define PMC_H + +#include <linux/types.h> +#include <linux/mutex.h> + +struct amd_pmc_dev { + void __iomem *regbase; + void __iomem *smu_virt_addr; + void __iomem *stb_virt_addr; + void __iomem *fch_virt_addr; + bool msg_port; + u32 base_addr; + u32 cpu_id; + u32 active_ips; + u32 dram_size; + u32 num_ips; + u32 s2d_msg_id; +/* SMU version information */ + u8 smu_program; + u8 major; + u8 minor; + u8 rev; + struct device *dev; + struct pci_dev *rdev; + struct mutex lock; /* generic mutex lock */ + struct dentry *dbgfs_dir; + struct quirk_entry *quirks; +}; + +void amd_pmc_process_restore_quirks(struct amd_pmc_dev *dev); +void amd_pmc_quirks_init(struct amd_pmc_dev *dev); + +#endif /* PMC_H */ diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig index d87986adf91e..3064bc8ea167 100644 --- a/drivers/platform/x86/amd/pmf/Kconfig +++ b/drivers/platform/x86/amd/pmf/Kconfig @@ -16,3 +16,14 @@ config AMD_PMF To compile this driver as a module, choose M here: the module will be called amd_pmf. + +config AMD_PMF_DEBUG + bool "PMF debug information" + depends on AMD_PMF + help + Enabling this option would give more debug information on the OEM fed + power setting values for each of the PMF feature. PMF driver gets this + information after evaluating a ACPI method and the information is stored + in the PMF config store. + + Say Y here to enable more debug logs and Say N here if you are not sure. diff --git a/drivers/platform/x86/amd/pmf/auto-mode.c b/drivers/platform/x86/amd/pmf/auto-mode.c index 96a8e1832c05..02ff68be10d0 100644 --- a/drivers/platform/x86/amd/pmf/auto-mode.c +++ b/drivers/platform/x86/amd/pmf/auto-mode.c @@ -15,6 +15,100 @@ static struct auto_mode_mode_config config_store; static const char *state_as_str(unsigned int state); +#ifdef CONFIG_AMD_PMF_DEBUG +static void amd_pmf_dump_auto_mode_defaults(struct auto_mode_mode_config *data) +{ + struct auto_mode_mode_settings *its_mode; + + pr_debug("Auto Mode Data - BEGIN\n"); + + /* time constant */ + pr_debug("balanced_to_perf: %u ms\n", + data->transition[AUTO_TRANSITION_TO_PERFORMANCE].time_constant); + pr_debug("perf_to_balanced: %u ms\n", + data->transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].time_constant); + pr_debug("quiet_to_balanced: %u ms\n", + data->transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].time_constant); + pr_debug("balanced_to_quiet: %u ms\n", + data->transition[AUTO_TRANSITION_TO_QUIET].time_constant); + + /* power floor */ + pr_debug("pfloor_perf: %u mW\n", data->mode_set[AUTO_PERFORMANCE].power_floor); + pr_debug("pfloor_balanced: %u mW\n", data->mode_set[AUTO_BALANCE].power_floor); + pr_debug("pfloor_quiet: %u mW\n", data->mode_set[AUTO_QUIET].power_floor); + + /* Power delta for mode change */ + pr_debug("pd_balanced_to_perf: %u mW\n", + data->transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta); + pr_debug("pd_perf_to_balanced: %u mW\n", + data->transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta); + pr_debug("pd_quiet_to_balanced: %u mW\n", + data->transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta); + pr_debug("pd_balanced_to_quiet: %u mW\n", + data->transition[AUTO_TRANSITION_TO_QUIET].power_delta); + + /* skin temperature limits */ + its_mode = &data->mode_set[AUTO_PERFORMANCE_ON_LAP]; + pr_debug("stt_apu_perf_on_lap: %u C\n", + its_mode->power_control.stt_skin_temp[STT_TEMP_APU]); + pr_debug("stt_hs2_perf_on_lap: %u C\n", + its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]); + pr_debug("stt_min_limit_perf_on_lap: %u mW\n", its_mode->power_control.stt_min); + + its_mode = &data->mode_set[AUTO_PERFORMANCE]; + pr_debug("stt_apu_perf: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_APU]); + pr_debug("stt_hs2_perf: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]); + pr_debug("stt_min_limit_perf: %u mW\n", its_mode->power_control.stt_min); + + its_mode = &data->mode_set[AUTO_BALANCE]; + pr_debug("stt_apu_balanced: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_APU]); + pr_debug("stt_hs2_balanced: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]); + pr_debug("stt_min_limit_balanced: %u mW\n", its_mode->power_control.stt_min); + + its_mode = &data->mode_set[AUTO_QUIET]; + pr_debug("stt_apu_quiet: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_APU]); + pr_debug("stt_hs2_quiet: %u C\n", its_mode->power_control.stt_skin_temp[STT_TEMP_HS2]); + pr_debug("stt_min_limit_quiet: %u mW\n", its_mode->power_control.stt_min); + + /* SPL based power limits */ + its_mode = &data->mode_set[AUTO_PERFORMANCE_ON_LAP]; + pr_debug("fppt_perf_on_lap: %u mW\n", its_mode->power_control.fppt); + pr_debug("sppt_perf_on_lap: %u mW\n", its_mode->power_control.sppt); + pr_debug("spl_perf_on_lap: %u mW\n", its_mode->power_control.spl); + pr_debug("sppt_apu_only_perf_on_lap: %u mW\n", its_mode->power_control.sppt_apu_only); + + its_mode = &data->mode_set[AUTO_PERFORMANCE]; + pr_debug("fppt_perf: %u mW\n", its_mode->power_control.fppt); + pr_debug("sppt_perf: %u mW\n", its_mode->power_control.sppt); + pr_debug("spl_perf: %u mW\n", its_mode->power_control.spl); + pr_debug("sppt_apu_only_perf: %u mW\n", its_mode->power_control.sppt_apu_only); + + its_mode = &data->mode_set[AUTO_BALANCE]; + pr_debug("fppt_balanced: %u mW\n", its_mode->power_control.fppt); + pr_debug("sppt_balanced: %u mW\n", its_mode->power_control.sppt); + pr_debug("spl_balanced: %u mW\n", its_mode->power_control.spl); + pr_debug("sppt_apu_only_balanced: %u mW\n", its_mode->power_control.sppt_apu_only); + + its_mode = &data->mode_set[AUTO_QUIET]; + pr_debug("fppt_quiet: %u mW\n", its_mode->power_control.fppt); + pr_debug("sppt_quiet: %u mW\n", its_mode->power_control.sppt); + pr_debug("spl_quiet: %u mW\n", its_mode->power_control.spl); + pr_debug("sppt_apu_only_quiet: %u mW\n", its_mode->power_control.sppt_apu_only); + + /* Fan ID */ + pr_debug("fan_id_perf: %lu\n", + data->mode_set[AUTO_PERFORMANCE].fan_control.fan_id); + pr_debug("fan_id_balanced: %lu\n", + data->mode_set[AUTO_BALANCE].fan_control.fan_id); + pr_debug("fan_id_quiet: %lu\n", + data->mode_set[AUTO_QUIET].fan_control.fan_id); + + pr_debug("Auto Mode Data - END\n"); +} +#else +static void amd_pmf_dump_auto_mode_defaults(struct auto_mode_mode_config *data) {} +#endif + static void amd_pmf_set_automode(struct amd_pmf_dev *dev, int idx, struct auto_mode_mode_config *table) { @@ -85,11 +179,34 @@ void amd_pmf_trans_automode(struct amd_pmf_dev *dev, int socket_power, ktime_t t config_store.transition[i].applied = false; update = true; } + +#ifdef CONFIG_AMD_PMF_DEBUG + dev_dbg(dev->dev, "[AUTO MODE] average_power : %d mW mode: %s\n", avg_power, + state_as_str(config_store.current_mode)); + + dev_dbg(dev->dev, "[AUTO MODE] time: %lld ms timer: %u ms tc: %u ms\n", + time_elapsed_ms, config_store.transition[i].timer, + config_store.transition[i].time_constant); + + dev_dbg(dev->dev, "[AUTO MODE] shiftup: %u pt: %u mW pf: %u mW pd: %u mW\n", + config_store.transition[i].shifting_up, + config_store.transition[i].power_threshold, + config_store.mode_set[i].power_floor, + config_store.transition[i].power_delta); +#endif } dev_dbg(dev->dev, "[AUTO_MODE] avg power: %u mW mode: %s\n", avg_power, state_as_str(config_store.current_mode)); +#ifdef CONFIG_AMD_PMF_DEBUG + dev_dbg(dev->dev, "[AUTO MODE] priority1: %u priority2: %u priority3: %u priority4: %u\n", + config_store.transition[0].applied, + config_store.transition[1].applied, + config_store.transition[2].applied, + config_store.transition[3].applied); +#endif + if (update) { for (j = 0; j < AUTO_TRANSITION_MAX; j++) { /* Apply the mode with highest priority indentified */ @@ -140,6 +257,30 @@ static void amd_pmf_get_power_threshold(void) config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_threshold = config_store.mode_set[AUTO_PERFORMANCE].power_floor - config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta; + +#ifdef CONFIG_AMD_PMF_DEBUG + pr_debug("[AUTO MODE TO_QUIET] pt: %u mW pf: %u mW pd: %u mW\n", + config_store.transition[AUTO_TRANSITION_TO_QUIET].power_threshold, + config_store.mode_set[AUTO_BALANCE].power_floor, + config_store.transition[AUTO_TRANSITION_TO_QUIET].power_delta); + + pr_debug("[AUTO MODE TO_PERFORMANCE] pt: %u mW pf: %u mW pd: %u mW\n", + config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_threshold, + config_store.mode_set[AUTO_BALANCE].power_floor, + config_store.transition[AUTO_TRANSITION_TO_PERFORMANCE].power_delta); + + pr_debug("[AUTO MODE QUIET_TO_BALANCE] pt: %u mW pf: %u mW pd: %u mW\n", + config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE] + .power_threshold, + config_store.mode_set[AUTO_QUIET].power_floor, + config_store.transition[AUTO_TRANSITION_FROM_QUIET_TO_BALANCE].power_delta); + + pr_debug("[AUTO MODE PERFORMANCE_TO_BALANCE] pt: %u mW pf: %u mW pd: %u mW\n", + config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE] + .power_threshold, + config_store.mode_set[AUTO_PERFORMANCE].power_floor, + config_store.transition[AUTO_TRANSITION_FROM_PERFORMANCE_TO_BALANCE].power_delta); +#endif } static const char *state_as_str(unsigned int state) @@ -262,6 +403,8 @@ static void amd_pmf_load_defaults_auto_mode(struct amd_pmf_dev *dev) /* set to initial default values */ config_store.current_mode = AUTO_BALANCE; dev->socket_power_history_idx = -1; + + amd_pmf_dump_auto_mode_defaults(&config_store); } int amd_pmf_reset_amt(struct amd_pmf_dev *dev) diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c index 4beb22a19466..539b186e9027 100644 --- a/drivers/platform/x86/amd/pmf/cnqf.c +++ b/drivers/platform/x86/amd/pmf/cnqf.c @@ -13,6 +13,61 @@ static struct cnqf_config config_store; +#ifdef CONFIG_AMD_PMF_DEBUG +static const char *state_as_str_cnqf(unsigned int state) +{ + switch (state) { + case APMF_CNQF_TURBO: + return "turbo"; + case APMF_CNQF_PERFORMANCE: + return "performance"; + case APMF_CNQF_BALANCE: + return "balance"; + case APMF_CNQF_QUIET: + return "quiet"; + default: + return "Unknown CnQF State"; + } +} + +static void amd_pmf_cnqf_dump_defaults(struct apmf_dyn_slider_output *data, int idx) +{ + int i; + + pr_debug("Dynamic Slider %s Defaults - BEGIN\n", idx ? "DC" : "AC"); + pr_debug("size: %u\n", data->size); + pr_debug("flags: 0x%x\n", data->flags); + + /* Time constants */ + pr_debug("t_perf_to_turbo: %u ms\n", data->t_perf_to_turbo); + pr_debug("t_balanced_to_perf: %u ms\n", data->t_balanced_to_perf); + pr_debug("t_quiet_to_balanced: %u ms\n", data->t_quiet_to_balanced); + pr_debug("t_balanced_to_quiet: %u ms\n", data->t_balanced_to_quiet); + pr_debug("t_perf_to_balanced: %u ms\n", data->t_perf_to_balanced); + pr_debug("t_turbo_to_perf: %u ms\n", data->t_turbo_to_perf); + + for (i = 0 ; i < CNQF_MODE_MAX ; i++) { + pr_debug("pfloor_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].pfloor); + pr_debug("fppt_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].fppt); + pr_debug("sppt_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].sppt); + pr_debug("sppt_apuonly_%s: %u mW\n", + state_as_str_cnqf(i), data->ps[i].sppt_apu_only); + pr_debug("spl_%s: %u mW\n", state_as_str_cnqf(i), data->ps[i].spl); + pr_debug("stt_minlimit_%s: %u mW\n", + state_as_str_cnqf(i), data->ps[i].stt_min_limit); + pr_debug("stt_skintemp_apu_%s: %u C\n", state_as_str_cnqf(i), + data->ps[i].stt_skintemp[STT_TEMP_APU]); + pr_debug("stt_skintemp_hs2_%s: %u C\n", state_as_str_cnqf(i), + data->ps[i].stt_skintemp[STT_TEMP_HS2]); + pr_debug("fan_id_%s: %u\n", state_as_str_cnqf(i), data->ps[i].fan_id); + } + + pr_debug("Dynamic Slider %s Defaults - END\n", idx ? "DC" : "AC"); +} +#else +static void amd_pmf_cnqf_dump_defaults(struct apmf_dyn_slider_output *data, int idx) {} +#endif + static int amd_pmf_set_cnqf(struct amd_pmf_dev *dev, int src, int idx, struct cnqf_config *table) { @@ -120,6 +175,13 @@ int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_l config_store.trans_param[src][i].count++; tp = &config_store.trans_param[src][i]; + +#ifdef CONFIG_AMD_PMF_DEBUG + dev_dbg(dev->dev, "avg_power: %u mW total_power: %u mW count: %u timer: %u ms\n", + avg_power, config_store.trans_param[src][i].total_power, + config_store.trans_param[src][i].count, + config_store.trans_param[src][i].timer); +#endif if (tp->timer >= tp->time_constant && tp->count) { avg_power = tp->total_power / tp->count; @@ -140,6 +202,18 @@ int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_l dev_dbg(dev->dev, "[CNQF] Avg power: %u mW socket power: %u mW mode:%s\n", avg_power, socket_power, state_as_str(config_store.current_mode)); +#ifdef CONFIG_AMD_PMF_DEBUG + dev_dbg(dev->dev, "[CNQF] priority1: %u priority2: %u priority3: %u\n", + config_store.trans_param[src][0].priority, + config_store.trans_param[src][1].priority, + config_store.trans_param[src][2].priority); + + dev_dbg(dev->dev, "[CNQF] priority4: %u priority5: %u priority6: %u\n", + config_store.trans_param[src][3].priority, + config_store.trans_param[src][4].priority, + config_store.trans_param[src][5].priority); +#endif + for (j = 0; j < CNQF_TRANSITION_MAX; j++) { /* apply the highest priority */ if (config_store.trans_param[src][j].priority) { @@ -284,6 +358,7 @@ static int amd_pmf_load_defaults_cnqf(struct amd_pmf_dev *dev) return ret; } + amd_pmf_cnqf_dump_defaults(&out, i); amd_pmf_update_mode_set(i, &out); amd_pmf_update_trans_data(i, &out); amd_pmf_update_power_threshold(i); diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c index d5bb775dadcf..d8732557f9db 100644 --- a/drivers/platform/x86/amd/pmf/core.c +++ b/drivers/platform/x86/amd/pmf/core.c @@ -40,6 +40,7 @@ /* List of supported CPU ids */ #define AMD_CPU_ID_RMB 0x14b5 #define AMD_CPU_ID_PS 0x14e8 +#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507 #define PMF_MSG_DELAY_MIN_US 50 #define RESPONSE_REGISTER_LOOP_MAX 20000 @@ -242,27 +243,33 @@ out_unlock: static const struct pci_device_id pmf_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RMB) }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) }, { } }; -int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev) +static void amd_pmf_set_dram_addr(struct amd_pmf_dev *dev) { u64 phys_addr; u32 hi, low; - INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics); + phys_addr = virt_to_phys(dev->buf); + hi = phys_addr >> 32; + low = phys_addr & GENMASK(31, 0); + + amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL); + amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL); +} +int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev) +{ /* Get Metrics Table Address */ dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL); if (!dev->buf) return -ENOMEM; - phys_addr = virt_to_phys(dev->buf); - hi = phys_addr >> 32; - low = phys_addr & GENMASK(31, 0); + INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics); - amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL); - amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL); + amd_pmf_set_dram_addr(dev); /* * Start collecting the metrics data after a small delay @@ -273,6 +280,18 @@ int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev) return 0; } +static int amd_pmf_resume_handler(struct device *dev) +{ + struct amd_pmf_dev *pdev = dev_get_drvdata(dev); + + if (pdev->buf) + amd_pmf_set_dram_addr(pdev); + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, NULL, amd_pmf_resume_handler); + static void amd_pmf_init_features(struct amd_pmf_dev *dev) { int ret; @@ -280,6 +299,8 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev) /* Enable Static Slider */ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) { amd_pmf_init_sps(dev); + dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call; + power_supply_reg_notifier(&dev->pwr_src_notifier); dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n"); } @@ -298,8 +319,10 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev) static void amd_pmf_deinit_features(struct amd_pmf_dev *dev) { - if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) + if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) { + power_supply_unreg_notifier(&dev->pwr_src_notifier); amd_pmf_deinit_sps(dev); + } if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) { amd_pmf_deinit_auto_mode(dev); @@ -312,6 +335,7 @@ static void amd_pmf_deinit_features(struct amd_pmf_dev *dev) static const struct acpi_device_id amd_pmf_acpi_ids[] = { {"AMDI0100", 0x100}, {"AMDI0102", 0}, + {"AMDI0103", 0}, { } }; MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids); @@ -382,9 +406,6 @@ static int amd_pmf_probe(struct platform_device *pdev) apmf_install_handler(dev); amd_pmf_dbgfs_register(dev); - dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call; - power_supply_reg_notifier(&dev->pwr_src_notifier); - dev_info(dev->dev, "registered PMF device successfully\n"); return 0; @@ -394,7 +415,6 @@ static void amd_pmf_remove(struct platform_device *pdev) { struct amd_pmf_dev *dev = platform_get_drvdata(pdev); - power_supply_unreg_notifier(&dev->pwr_src_notifier); amd_pmf_deinit_features(dev); apmf_acpi_deinit(dev); amd_pmf_dbgfs_unregister(dev); @@ -413,6 +433,7 @@ static struct platform_driver amd_pmf_driver = { .name = "amd-pmf", .acpi_match_table = amd_pmf_acpi_ids, .dev_groups = amd_pmf_driver_groups, + .pm = pm_sleep_ptr(&amd_pmf_pm), }, .probe = amd_pmf_probe, .remove_new = amd_pmf_remove, diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c index bed762d47a14..445ff053b4df 100644 --- a/drivers/platform/x86/amd/pmf/sps.c +++ b/drivers/platform/x86/amd/pmf/sps.c @@ -12,6 +12,60 @@ static struct amd_pmf_static_slider_granular config_store; +#ifdef CONFIG_AMD_PMF_DEBUG +static const char *slider_as_str(unsigned int state) +{ + switch (state) { + case POWER_MODE_PERFORMANCE: + return "PERFORMANCE"; + case POWER_MODE_BALANCED_POWER: + return "BALANCED_POWER"; + case POWER_MODE_POWER_SAVER: + return "POWER_SAVER"; + default: + return "Unknown Slider State"; + } +} + +static const char *source_as_str(unsigned int state) +{ + switch (state) { + case POWER_SOURCE_AC: + return "AC"; + case POWER_SOURCE_DC: + return "DC"; + default: + return "Unknown Power State"; + } +} + +static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *data) +{ + int i, j; + + pr_debug("Static Slider Data - BEGIN\n"); + + for (i = 0; i < POWER_SOURCE_MAX; i++) { + for (j = 0; j < POWER_MODE_MAX; j++) { + pr_debug("--- Source:%s Mode:%s ---\n", source_as_str(i), slider_as_str(j)); + pr_debug("SPL: %u mW\n", data->prop[i][j].spl); + pr_debug("SPPT: %u mW\n", data->prop[i][j].sppt); + pr_debug("SPPT_ApuOnly: %u mW\n", data->prop[i][j].sppt_apu_only); + pr_debug("FPPT: %u mW\n", data->prop[i][j].fppt); + pr_debug("STTMinLimit: %u mW\n", data->prop[i][j].stt_min); + pr_debug("STT_SkinTempLimit_APU: %u C\n", + data->prop[i][j].stt_skin_temp[STT_TEMP_APU]); + pr_debug("STT_SkinTempLimit_HS2: %u C\n", + data->prop[i][j].stt_skin_temp[STT_TEMP_HS2]); + } + } + + pr_debug("Static Slider Data - END\n"); +} +#else +static void amd_pmf_dump_sps_defaults(struct amd_pmf_static_slider_granular *data) {} +#endif + static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev) { struct apmf_static_slider_granular_output output; @@ -36,6 +90,7 @@ static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev) idx++; } } + amd_pmf_dump_sps_defaults(&config_store); } void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx, |